Merge tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "Core:
   - shared fencing staging removal
   - drop transactional atomic helpers and move helpers to new location
   - DP/MST atomic cleanup
   - Leasing cleanups and drop EXPORT_SYMBOL
   - Convert drivers to atomic helpers and generic fbdev.
   - removed deprecated obj_ref/unref in favour of get/put
   - Improve dumb callback documentation
   - MODESET_LOCK_BEGIN/END helpers

  panels:
   - CDTech panels, Banana Pi Panel, DLC1010GIG,
   - Olimex LCD-O-LinuXino, Samsung S6D16D0, Truly NT35597 WQXGA,
   - Himax HX8357D, simulated RTSM AEMv8.
   - GPD Win2 panel
   - AUO G101EVN010

  vgem:
   - render node support

  ttm:
   - move global init out of drivers
   - fix LRU handling for ghost objects
   - Support for simultaneous submissions to multiple engines

  scheduler:
   - timeout/fault handling changes to help GPU recovery
   - helpers for hw with preemption support

  i915:
   - Scaler/Watermark fixes
   - DP MST + powerwell fixes
   - PSR fixes
   - Break long get/put shmemfs pages
   - Icelake fixes
   - Icelake DSI video mode enablement
   - Engine workaround improvements

  amdgpu:
   - freesync support
   - GPU reset enabled on CI, VI, SOC15 dGPUs
   - ABM support in DC
   - KFD support for vega12/polaris12
   - SDMA paging queue on vega
   - More amdkfd code sharing
   - DCC scanout on GFX9
   - DC kerneldoc
   - Updated SMU firmware for GFX8 chips
   - XGMI PSP + hive reset support
   - GPU reset
   - DC trace support
   - Powerplay updates for newer Polaris
   - Cursor plane update fast path
   - kfd dma-buf support

  virtio-gpu:
   - add EDID support

  vmwgfx:
   - pageflip with damage support

  nouveau:
   - Initial Turing TU104/TU106 modesetting support

  msm:
   - a2xx gpu support for apq8060 and imx5
   - a2xx gpummu support
   - mdp4 display support for apq8060
   - DPU fixes and cleanups
   - enhanced profiling support
   - debug object naming interface
   - get_iova/page pinning decoupling

  tegra:
   - Tegra194 host1x, VIC and display support enabled
   - Audio over HDMI for Tegra186 and Tegra194

  exynos:
   - DMA/IOMMU refactoring
   - plane alpha + blend mode support
   - Color format fixes for mixer driver

  rcar-du:
   - R8A7744 and R8A77470 support
   - R8A77965 LVDS support

  imx:
   - fbdev emulation fix
   - multi-tiled scalling fixes
   - SPDX identifiers

  rockchip
   - dw_hdmi support
   - dw-mipi-dsi + dual dsi support
   - mailbox read size fix

  qxl:
   - fix cursor pinning

  vc4:
   - YUV support (scaling + cursor)

  v3d:
   - enable TFU (Texture Formatting Unit)

  mali-dp:
   - add support for linear tiled formats

  sun4i:
   - Display Engine 3 support
   - H6 DE3 mixer 0 support
   - H6 display engine support
   - dw-hdmi support
   - H6 HDMI phy support
   - implicit fence waiting
   - BGRX8888 support

  meson:
   - Overlay plane support
   - implicit fence waiting
   - HDMI 1.4 4k modes

  bridge:
   - i2c fixes for sii902x"

* tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm: (1403 commits)
  drm/amd/display: Add fast path for cursor plane updates
  drm/amdgpu: Enable GPU recovery by default for CI
  drm/amd/display: Fix duplicating scaling/underscan connector state
  drm/amd/display: Fix unintialized max_bpc state values
  Revert "drm/amd/display: Set RMX_ASPECT as default"
  drm/amdgpu: Fix stub function name
  drm/msm/dpu: Fix clock issue after bind failure
  drm/msm/dpu: Clean up dpu_media_info.h static inline functions
  drm/msm/dpu: Further cleanups for static inline functions
  drm/msm/dpu: Cleanup the debugfs functions
  drm/msm/dpu: Remove dpu_irq and unused functions
  drm/msm: Make irq_postinstall optional
  drm/msm/dpu: Cleanup callers of dpu_hw_blk_init
  drm/msm/dpu: Remove unused functions
  drm/msm/dpu: Remove dpu_crtc_is_enabled()
  drm/msm/dpu: Remove dpu_crtc_get_mixer_height
  drm/msm/dpu: Remove dpu_dbg
  drm/msm: dpu: Remove crtc_lock
  drm/msm: dpu: Remove vblank_requested flag from dpu_crtc
  drm/msm: dpu: Separate crtc assignment from vblank enable
  ...
diff --git a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
index 87dfb33..b9d5337 100644
--- a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
+++ b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
@@ -1,11 +1,14 @@
-Device tree bindings for Allwinner A64 DE2 bus
+Device tree bindings for Allwinner DE2/3 bus
 
 The Allwinner A64 DE2 is on a special bus, which needs a SRAM region (SRAM C)
-to be claimed for enabling the access.
+to be claimed for enabling the access. The DE3 on Allwinner H6 is at the same
+situation, and the binding also applies.
 
 Required properties:
 
- - compatible:		Should contain "allwinner,sun50i-a64-de2"
+ - compatible:		Should be one of:
+				- "allwinner,sun50i-a64-de2"
+				- "allwinner,sun50i-h6-de3", "allwinner,sun50i-a64-de2"
  - reg:			A resource specifier for the register space
  - #address-cells:	Must be set to 1
  - #size-cells:		Must be set to 1
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
index 057b813..c65fd7a 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -67,6 +67,8 @@
 Optional properties:
 - power-domains: Optional phandle to associated power domain as described in
 	the file ../power/power_domain.txt
+- amlogic,canvas: phandle to canvas provider node as described in the file
+	../soc/amlogic/amlogic,canvas.txt
 
 Required nodes:
 
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index 3aeb0ec..ba5469d 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -13,6 +13,7 @@
   - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
   - "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders
   - "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders
+  - "renesas,r8a77965-lvds" for R8A77965 (R-Car M3-N) compatible LVDS encoders
   - "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders
   - "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders
   - "renesas,r8a77990-lvds" for R8A77990 (R-Car E3) compatible LVDS encoders
diff --git a/Documentation/devicetree/bindings/display/himax,hx8357d.txt b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
new file mode 100644
index 0000000..e641f66
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
@@ -0,0 +1,26 @@
+Himax HX8357D display panels
+
+This binding is for display panels using a Himax HX8357D controller in SPI
+mode, such as the Adafruit 3.5" TFT for Raspberry Pi.
+
+Required properties:
+- compatible:	"adafruit,yx350hv15", "himax,hx8357d"
+- dc-gpios:	D/C pin
+- reg:		address of the panel on the SPI bus
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation:	panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight:	phandle of the backlight device attached to the panel
+
+Example:
+	display@0{
+		compatible = "adafruit,yx350hv15", "himax,hx8357d";
+		reg = <0>;
+		spi-max-frequency = <32000000>;
+		dc-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+		rotation = <90>;
+		backlight = <&backlight>;
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index dfc7432..9ae9469 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -106,6 +106,7 @@
 - clocks: Phandles to device clocks. See [1] for details on clock bindings.
 - clock-names: the following clocks are required:
   * "iface"
+  * "ref" (only required for new DTS files/entries)
   For 28nm HPM/LP, 28nm 8960 PHYs:
 - vddio-supply: phandle to vdd-io regulator device node
   For 20nm PHY:
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 43fac0f..ac8df3b 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -1,11 +1,13 @@
 Qualcomm adreno/snapdragon GPU
 
 Required properties:
-- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno" or
+	      "amd,imageon-XYZ.W", "amd,imageon"
     for example: "qcom,adreno-306.0", "qcom,adreno"
   Note that you need to list the less specific "qcom,adreno" (since this
   is what the device is matched on), in addition to the more specific
   with the chip-id.
+  If "amd,imageon" is used, there should be no top level msm device.
 - reg: Physical base address and length of the controller's registers.
 - interrupts: The interrupt signal from the gpu.
 - clocks: device clocks
diff --git a/Documentation/devicetree/bindings/display/msm/mdp4.txt b/Documentation/devicetree/bindings/display/msm/mdp4.txt
index 3c341a1..b07eeb3 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp4.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp4.txt
@@ -38,6 +38,8 @@
 Optional properties:
 - clock-names: the following clocks are optional:
   * "lut_clk"
+- qcom,lcdc-align-lsb: Boolean value indicating that LSB alignment should be
+  used for LCDC. This is only valid for 18bpp panels.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010 b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
new file mode 100644
index 0000000..bc6a0c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
@@ -0,0 +1,12 @@
+AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g101evn010"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
new file mode 100644
index 0000000..35bc0c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
@@ -0,0 +1,12 @@
+Banana Pi 7" (S070WV20-CT16) TFT LCD Panel
+
+Required properties:
+- compatible: should be "bananapi,s070wv20-ct16"
+- power-supply: see ./panel-common.txt
+
+Optional properties:
+- enable-gpios: see ./simple-panel.txt
+- backlight: see ./simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in ./simple-panel.txt.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
new file mode 100644
index 0000000..057f7f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s043wq26h-ct7"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
new file mode 100644
index 0000000..505615d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s070wv95-ct16"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
new file mode 100644
index 0000000..fbf5dcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
@@ -0,0 +1,12 @@
+DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+
+Required properties:
+- compatible: should be "dlc,dlc1010gig"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
new file mode 100644
index 0000000..a89f9c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
@@ -0,0 +1,42 @@
+Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
+
+This device can be used as bridge between a host controller and LCD panels.
+Currently supported LCDs are:
+  - LCD-OLinuXino-4.3TS
+  - LCD-OLinuXino-5
+  - LCD-OLinuXino-7
+  - LCD-OLinuXino-10
+
+The panel itself contains:
+  - AT24C16C EEPROM holding panel identification and timing requirements
+  - AR1021 resistive touch screen controller (optional)
+  - FT5x6 capacitive touch screnn controller (optional)
+  - GT911/GT928 capacitive touch screen controller (optional)
+
+The above chips share same I2C bus. The EEPROM is factory preprogrammed with
+device information (id, serial, etc.) and timing requirements.
+
+Touchscreen bingings can be found in these files:
+  - input/touchscreen/goodix.txt
+  - input/touchscreen/edt-ft5x06.txt
+  - input/touchscreen/ar1021.txt
+
+Required properties:
+  - compatible: should be "olimex,lcd-olinuxino"
+  - reg: address of the configuration EEPROM, should be <0x50>
+  - power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+  - enable-gpios: GPIO pin to enable or disable the panel
+  - backlight: phandle of the backlight device attacked to the panel
+
+Example:
+&i2c2 {
+	panel@50 {
+		compatible = "olimex,lcd-olinuxino";
+		reg = <0x50>;
+		power-supply = <&reg_vcc5v0>;
+		enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
+		backlight = <&backlight>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
new file mode 100644
index 0000000..b94e366
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
@@ -0,0 +1,30 @@
+Samsung S6D16D0 4" 864x480 AMOLED panel
+
+Required properties:
+  - compatible: should be:
+    "samsung,s6d16d0",
+  - reg: the virtual channel number of a DSI peripheral
+  - vdd1-supply: I/O voltage supply
+  - reset-gpios: a GPIO spec for the reset pin (active low)
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
+
+Example:
+&dsi {
+	...
+
+	panel@0 {
+		compatible = "samsung,s6d16d0";
+		reg = <0>;
+		vdd1-supply = <&foo>;
+		reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&dsi_out>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
index f5e3c6f..40f3d7c 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
@@ -1,47 +1,70 @@
 TPO TPG110 Panel
 ================
 
-This binding builds on the DPI bindings, adding a few properties
-as a superset of a DPI. See panel-dpi.txt for the required DPI
-bindings.
+This panel driver is a component that acts as an intermediary
+between an RGB output and a variety of panels. The panel
+driver is strapped up in electronics to the desired resolution
+and other properties, and has a control interface over 3WIRE
+SPI. By talking to the TPG110 over SPI, the strapped properties
+can be discovered and the hardware is therefore mostly
+self-describing.
+
+       +--------+
+SPI -> |  TPO   | -> physical display
+RGB -> | TPG110 |
+       +--------+
+
+If some electrical strap or alternate resolution is desired,
+this can be set up by taking software control of the display
+over the SPI interface. The interface can also adjust
+for properties of the display such as gamma correction and
+certain electrical driving levels.
+
+The TPG110 does not know the physical dimensions of the panel
+connected, so this needs to be specified in the device tree.
+
+It requires a GPIO line for control of its reset line.
+
+The serial protocol has line names that resemble I2C but the
+protocol is not I2C but 3WIRE SPI.
 
 Required properties:
-- compatible : "tpo,tpg110"
+- compatible : one of:
+  "ste,nomadik-nhk15-display", "tpo,tpg110"
+  "tpo,tpg110"
 - grestb-gpios : panel reset GPIO
-- scen-gpios : serial control enable GPIO
-- scl-gpios : serial control clock line GPIO
-- sda-gpios : serial control data line GPIO
+- width-mm : see display/panel/panel-common.txt
+- height-mm : see display/panel/panel-common.txt
 
-Required nodes:
-- Video port for DPI input, see panel-dpi.txt
-- Panel timing for DPI setup, see panel-dpi.txt
+The device needs to be a child of an SPI bus, see
+spi/spi-bus.txt. The SPI child must set the following
+properties:
+- spi-3wire
+- spi-max-frequency = <3000000>;
+as these are characteristics of this device.
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
 
 Example
 -------
 
-panel {
-	compatible = "tpo,tpg110", "panel-dpi";
-	grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
-	scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
-	scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
-	sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+panel: display@0 {
+	compatible = "tpo,tpg110";
+	reg = <0>;
+	spi-3wire;
+	/* 320 ns min period ~= 3 MHz */
+	spi-max-frequency = <3000000>;
+	/* Width and height from data sheet */
+	width-mm = <116>;
+	height-mm = <87>;
+	grestb-gpios = <&foo_gpio 5 GPIO_ACTIVE_LOW>;
 	backlight = <&bl>;
 
 	port {
 		nomadik_clcd_panel: endpoint {
-			remote-endpoint = <&nomadik_clcd_pads>;
+			remote-endpoint = <&foo>;
 		};
 	};
-
-	panel-timing {
-		clock-frequency = <33200000>;
-		hactive = <800>;
-		hback-porch = <216>;
-		hfront-porch = <40>;
-		hsync-len = <1>;
-		vactive = <480>;
-		vback-porch = <35>;
-		vfront-porch = <10>;
-		vsync-len = <1>;
-	};
 };
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 9de67be..3c855d9 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -4,7 +4,9 @@
 
   - compatible: must be one of the following.
     - "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
+    - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
     - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
+    - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
     - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
     - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -52,7 +54,9 @@
                         Port0          Port1          Port2          Port3
 -----------------------------------------------------------------------------
  R8A7743 (RZ/G1M)       DPAD 0         LVDS 0         -              -
+ R8A7744 (RZ/G1N)       DPAD 0         LVDS 0         -              -
  R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
+ R8A77470 (RZ/G1C)      DPAD 0         DPAD 1         LVDS 0         -
  R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
  R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
  R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index adc94fc..3914342 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -13,6 +13,7 @@
 
 - compatible: should be one of the following:
 		"rockchip,rk3288-dw-hdmi"
+		"rockchip,rk3328-dw-hdmi"
 		"rockchip,rk3399-dw-hdmi"
 - reg: See dw_hdmi.txt.
 - reg-io-width: See dw_hdmi.txt. Shall be 4.
@@ -34,6 +35,8 @@
 - clock-names: May contain "cec" as defined in dw_hdmi.txt.
 - clock-names: May contain "grf", power for grf io.
 - clock-names: May contain "vpll", external clock for some hdmi phy.
+- phys: from general PHY binding: the phandle for the PHY device.
+- phy-names: Should be "hdmi" if phys references an external phy.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 7854fff..f426bdb 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -79,6 +79,7 @@
   - compatible: value must be one of:
     * "allwinner,sun8i-a83t-dw-hdmi"
     * "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"
+    * "allwinner,sun50i-h6-dw-hdmi"
   - reg: base address and size of memory-mapped region
   - reg-io-width: See dw_hdmi.txt. Shall be 1.
   - interrupts: HDMI interrupt number
@@ -86,9 +87,14 @@
     * iahb: the HDMI bus clock
     * isfr: the HDMI register clock
     * tmds: TMDS clock
+    * cec: HDMI CEC clock (H6 only)
+    * hdcp: HDCP clock (H6 only)
+    * hdcp-bus: HDCP bus clock (H6 only)
   - clock-names: the clock names mentioned above
-  - resets: phandle to the reset controller
-  - reset-names: must be "ctrl"
+  - resets:
+    * ctrl: HDMI controller reset
+    * hdcp: HDCP reset (H6 only)
+  - reset-names: reset names mentioned above
   - phys: phandle to the DWC HDMI PHY
   - phy-names: must be "phy"
 
@@ -109,6 +115,7 @@
     * allwinner,sun8i-h3-hdmi-phy
     * allwinner,sun8i-r40-hdmi-phy
     * allwinner,sun50i-a64-hdmi-phy
+    * allwinner,sun50i-h6-hdmi-phy
   - reg: base address and size of memory-mapped region
   - clocks: phandles to the clocks feeding the HDMI PHY
     * bus: the HDMI PHY interface clock
@@ -158,6 +165,7 @@
    * allwinner,sun9i-a80-tcon-tv
    * "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd"
    * "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv"
+   * allwinner,sun50i-h6-tcon-tv, allwinner,sun8i-r40-tcon-tv
  - reg: base address and size of memory-mapped region
  - interrupts: interrupt associated to this IP
  - clocks: phandles to the clocks feeding the TCON.
@@ -220,24 +228,26 @@
                  \ [3] TCON-TV1 [1] - TVE1/RGB
 
 Note that both TCON TOP references same physical unit. Both mixers can be
-connected to any TCON.
+connected to any TCON. Not all TCON TOP variants support all features.
 
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun8i-r40-tcon-top
+    * allwinner,sun50i-h6-tcon-top
   - reg: base address and size of the memory-mapped region.
   - clocks: phandle to the clocks feeding the TCON TOP
     * bus: TCON TOP interface clock
     * tcon-tv0: TCON TV0 clock
-    * tve0: TVE0 clock
-    * tcon-tv1: TCON TV1 clock
-    * tve1: TVE0 clock
-    * dsi: MIPI DSI clock
+    * tve0: TVE0 clock (R40 only)
+    * tcon-tv1: TCON TV1 clock (R40 only)
+    * tve1: TVE0 clock (R40 only)
+    * dsi: MIPI DSI clock (R40 only)
   - clock-names: clock name mentioned above
   - resets: phandle to the reset line driving the TCON TOP
   - #clock-cells : must contain 1
   - clock-output-names: Names of clocks created for TCON TV0 channel clock,
-    TCON TV1 channel clock and DSI channel clock, in that order.
+    TCON TV1 channel clock (R40 only) and DSI channel clock (R40 only), in
+    that order.
 
 - ports: A ports node with endpoint definitions as defined in
     Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
@@ -381,6 +391,7 @@
     * allwinner,sun8i-v3s-de2-mixer
     * allwinner,sun50i-a64-de2-mixer-0
     * allwinner,sun50i-a64-de2-mixer-1
+    * allwinner,sun50i-h6-de3-mixer-0
   - reg: base address and size of the memory-mapped region.
   - clocks: phandles to the clocks feeding the mixer
     * bus: the mixer interface clock
@@ -415,9 +426,10 @@
     * allwinner,sun8i-v3s-display-engine
     * allwinner,sun9i-a80-display-engine
     * allwinner,sun50i-a64-display-engine
+    * allwinner,sun50i-h6-display-engine
 
   - allwinner,pipelines: list of phandle to the display engine
-    frontends (DE 1.0) or mixers (DE 2.0) available.
+    frontends (DE 1.0) or mixers (DE 2.0/3.0) available.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/truly,nt35597.txt b/Documentation/devicetree/bindings/display/truly,nt35597.txt
new file mode 100644
index 0000000..f39c77e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/truly,nt35597.txt
@@ -0,0 +1,59 @@
+Truly model NT35597 DSI display driver
+
+The Truly NT35597 is a generic display driver, currently only configured
+for use in the 2K display on the Qualcomm SDM845 MTP board.
+
+Required properties:
+- compatible: should be "truly,nt35597-2K-display"
+- vdda-supply: phandle of the regulator that provides the supply voltage
+  Power IC supply
+- vdispp-supply: phandle of the regulator that provides the supply voltage
+  for positive LCD bias
+- vdispn-supply: phandle of the regulator that provides the supply voltage
+  for negative LCD bias
+- reset-gpios: phandle of gpio for reset line
+  This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
+  (active low)
+- mode-gpios: phandle of the gpio for choosing the mode of the display
+  for single DSI or Dual DSI
+  This should be low for dual DSI and high for single DSI mode
+- ports: This device has two video ports driven by two DSIs. Their connections
+  are modeled using the OF graph bindings specified in
+  Documentation/devicetree/bindings/graph.txt.
+  - port@0: DSI input port driven by master DSI
+  - port@1: DSI input port driven by secondary DSI
+
+Example:
+
+	dsi@ae94000 {
+		panel@0 {
+			compatible = "truly,nt35597-2K-display";
+			reg = <0>;
+			vdda-supply = <&pm8998_l14>;
+			vdispp-supply = <&lab_regulator>;
+			vdispn-supply = <&ibb_regulator>;
+			pinctrl-names = "default", "suspend";
+			pinctrl-0 = <&dpu_dsi_active>;
+			pinctrl-1 = <&dpu_dsi_suspend>;
+
+			reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+			mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					panel0_in: endpoint {
+						remote-endpoint = <&dsi0_out>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					panel1_in: endpoint {
+						remote-endpoint = <&dsi1_out>;
+					};
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4b1a2a8..a2f4451 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -67,6 +67,7 @@
 cascoda	Cascoda, Ltd.
 cavium	Cavium, Inc.
 cdns	Cadence Design Systems Inc.
+cdtech	CDTech(H.K.) Electronics Limited
 ceva	Ceva, Inc.
 chipidea	Chipidea, Inc
 chipone		ChipOne
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
new file mode 100644
index 0000000..cc89b0f
--- /dev/null
+++ b/Documentation/gpu/amdgpu-dc.rst
@@ -0,0 +1,68 @@
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+*placeholder - general description of supported platforms, what dc is, etc.*
+
+Because it is partially shared with other operating systems, the Display Core
+Driver is divided in two pieces.
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+   hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+   amdgpu base driver and DRM are implemented here.
+
+It doesn't help that the entire package is frequently referred to as DC. But
+with the context in mind, it should be clear.
+
+When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
+supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
+Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
+
+To determine if DC is loaded, search dmesg for the following entry:
+
+``Display Core initialized with <version number here>``
+
+AMDgpu Display Manager
+======================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+   :internal:
+
+Lifecycle
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+   :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
+
+Display Core
+============
+
+**WIP**
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 7d2d387..7c16721 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -5,6 +5,7 @@
 .. toctree::
 
    amdgpu
+   amdgpu-dc
    i915
    meson
    pl111
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index f9cfcdcd..b422eb8 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -59,12 +59,6 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
    :doc: implementing nonblocking commit
 
-Atomic State Reset and Initialization
--------------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
-   :doc: atomic state reset and initialization
-
 Helper Functions Reference
 --------------------------
 
@@ -74,6 +68,21 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
    :export:
 
+Atomic State Reset and Initialization
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
+   :doc: atomic state reset and initialization
+
+Atomic State Helper Reference
+-----------------------------
+
+.. kernel-doc:: include/drm/drm_atomic_state_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
+   :export:
+
 Simple KMS Helper Reference
 ===========================
 
@@ -223,6 +232,18 @@
 .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
    :export:
 
+Display Stream Compression Helper Functions Reference
+=====================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+   :doc: dsc helpers
+
+.. kernel-doc:: include/drm/drm_dsc.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+   :export:
+
 Output Probing Helper Functions Reference
 =========================================
 
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 4b1501b..75c882e 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -554,6 +554,18 @@
 .. kernel-doc:: drivers/gpu/drm/drm_blend.c
    :export:
 
+FB_DAMAGE_CLIPS
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+   :internal:
+
 Color Management Properties
 ---------------------------
 
@@ -575,6 +587,13 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
    :doc: explicit fencing properties
 
+
+Variable Refresh Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: Variable refresh properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index e725e84..54a696d 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -72,16 +72,13 @@
 the kernel itself. The type of this object should be
 TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
 ttm_bo_global). Again, driver-specific init and release functions may
-be provided, likely eventually calling ttm_bo_global_init() and
-ttm_bo_global_release(), respectively. Also, like the previous
+be provided, likely eventually calling ttm_bo_global_ref_init() and
+ttm_bo_global_ref_release(), respectively. Also, like the previous
 object, ttm_global_item_ref() is used to create an initial reference
 count for the TTM, which will call your initialization function.
 
 See the radeon_ttm.c file for an example of usage.
 
-.. kernel-doc:: drivers/gpu/drm/drm_global.c
-   :export:
-
 
 The Graphics Execution Manager (GEM)
 ====================================
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index a2214cc..4b4bf2c 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -197,6 +197,9 @@
         difference between EACCESS and EPERM.
 
 ENODEV:
+        The device is not (yet) present or fully initialized.
+
+EOPNOTSUPP:
         Feature (like PRIME, modesetting, GEM) is not supported by the driver.
 
 ENXIO:
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 77c2b3c..14191b6 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -28,22 +28,16 @@
 
 Contact: Daniel Vetter, Thierry Reding, respective driver maintainers
 
-Switch from reference/unreference to get/put
---------------------------------------------
 
-For some reason DRM core uses ``reference``/``unreference`` suffixes for
-refcounting functions, but kernel uses ``get``/``put`` (e.g.
-``kref_get``/``put()``). It would be good to switch over for consistency, and
-it's shorter. Needs to be done in 3 steps for each pair of functions:
+Remove custom dumb_map_offset implementations
+---------------------------------------------
 
-* Create new ``get``/``put`` functions, define the old names as compatibility
-  wrappers
-* Switch over each file/driver using a cocci-generated spatch.
-* Once all users of the old names are gone, remove them.
+All GEM based drivers should be using drm_gem_create_mmap_offset() instead.
+Audit each individual driver, make sure it'll work with the generic
+implementation (there's lots of outdated locking leftovers in various
+implementations), and then remove it.
 
-This way drivers/patches in the progress of getting merged won't break.
-
-Contact: Daniel Vetter
+Contact: Daniel Vetter, respective driver maintainers
 
 Convert existing KMS drivers to atomic modesetting
 --------------------------------------------------
@@ -234,6 +228,34 @@
 
 Contact: Daniel Vetter
 
+Defaults for .gem_prime_import and export
+-----------------------------------------
+
+Most drivers don't need to set drm_driver->gem_prime_import and
+->gem_prime_export now that drm_gem_prime_import() and drm_gem_prime_export()
+are the default.
+
+struct drm_gem_object_funcs
+---------------------------
+
+GEM objects can now have a function table instead of having the callbacks on the
+DRM driver struct. This is now the preferred way and drivers can be moved over.
+
+Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
+---------------------------------------------------------
+
+For cases where drivers are attempting to grab the modeset locks with a local
+acquire context. Replace the boilerplate code surrounding
+drm_modeset_lock_all_ctx() with DRM_MODESET_LOCK_ALL_BEGIN() and
+DRM_MODESET_LOCK_ALL_END() instead.
+
+This should also be done for all places where drm_modest_lock_all() is still
+used.
+
+As a reference, take a look at the conversions already completed in drm core.
+
+Contact: Sean Paul, respective driver maintainers
+
 Core refactorings
 =================
 
@@ -339,6 +361,16 @@
   leftovers from older (never merged into upstream) KMS designs where modes
   where set using their ID, including support to add/remove modes.
 
+- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
+  function tables in drivers, but before we can remove them we need to make sure
+  that all the users in helpers and drivers do correctly check for a NULL
+  vtable.
+
+- Cleanup up the various ->destroy callbacks. A lot of them just wrapt the
+  drm_*_cleanup implementations and can be removed. Some tack a kfree() at the
+  end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
+  historical reasons) misnamed drm_primary_helper_destroy() function.
+
 Better Testing
 ==============
 
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 0a6ea62..7dfc349 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -10,8 +10,8 @@
 TODO
 ====
 
-CRC API
--------
+CRC API Improvements
+--------------------
 
 - Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
 
@@ -22,3 +22,100 @@
 
 - Add igt test to check extreme alpha values i.e. fully opaque and fully
   transparent (intermediate values are affected by hw-specific rounding modes).
+
+Vblank issues
+-------------
+
+Some IGT test cases are failing. Need to analyze why and fix the issues:
+
+- plain-flip-fb-recreate
+- plain-flip-ts-check
+- flip-vs-blocking-wf-vblank
+- plain-flip-fb-recreate-interruptible
+- flip-vs-wf_vblank-interruptible
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
+  compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+  them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+  the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs.  All existing module options should be supported through configfs too.
+
+Add Plane Features
+------------------
+
+There's lots of plane features we could add support for:
+
+- Real overlay planes, not just cursor.
+
+- Full alpha blending on all planes.
+
+- Rotation, scaling.
+
+- Additional buffer formats, especially YUV formats for video like NV12.
+  Low/high bpp RGB formats would also be interesting.
+
+- Async updates (currently only possible on cursor plane using the legacy cursor
+  api).
+
+For all of these, we also want to review the igt test coverage and make sure all
+relevant igt testcases work on vkms.
+
+Writeback support
+-----------------
+
+Currently vkms only computes a CRC for each frame. Once we have additional plane
+features, we could write back the entire composited frame, and expose it as:
+
+- Writeback connector. This is useful for testing compositors if you don't have
+  hardware with writeback support.
+
+- As a v4l device. This is useful for debugging compositors on special vkms
+  configurations, so that developers see what's really going on.
+
+Prime Buffer Sharing
+--------------------
+
+We already have vgem, which is a gem driver for testing rendering, similar to
+how vkms is for testing the modeset side. Adding buffer sharing support to vkms
+allows us to test them together, to test synchronization and lots of other
+features. Also, this allows compositors to test whether they work correctly on
+SoC chips, where the display and rendering is very often split between 2
+drivers.
+
+Output Features
+---------------
+
+- Variable refresh rate/freesync support. This probably needs prime buffer
+  sharing support, so that we can use vgem fences to simulate rendering in
+  testing. Also needs support to specify the EDID.
+
+- Add support for link status, so that compositors can validate their runtime
+  fallbacks when e.g. a Display Port link goes bad.
+
+- All the hotplug handling describe under "Runtime Configuration".
+
+Atomic Check using eBPF
+-----------------------
+
+Atomic drivers have lots of restrictions which are not exposed to userspace in
+any explicit form through e.g. possible property values. Userspace can only
+inquiry about these limits through the atomic IOCTL, possibly using the
+TEST_ONLY flag. Trying to add configurable code for all these limits, to allow
+compositors to be tested against them, would be rather futile exercise. Instead
+we could add support for eBPF to validate any kind of atomic state, and
+implement a library of different restrictions.
+
+This needs a bunch of features (plane compositing, multiple outputs, ...)
+enabled already to make sense.
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index fdd84cb..b8e29f9 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -143,7 +143,7 @@
 	Query the address space, and return true if it is completely
 	unevictable.
 
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
 
  (1) By ramfs to mark the address spaces of its inodes when they are created,
      and this mark remains for the life of the inode.
@@ -154,6 +154,10 @@
      swapped out; the application must touch the pages manually if it wants to
      ensure they're in memory.
 
+ (3) By the i915 driver to mark pinned address space until it's unpinned. The
+     amount of unevictable memory marked by i915 driver is roughly the bounded
+     object size in debugfs/dri/0/i915_gem_objects.
+
 
 Detecting Unevictable Pages
 ---------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index f3a5c97..6df3506d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4737,6 +4737,13 @@
 F:	drivers/gpu/drm/tinydrm/ili9225.c
 F:	Documentation/devicetree/bindings/display/ilitek,ili9225.txt
 
+DRM DRIVER FOR HX8357D PANELS
+M:	Eric Anholt <eric@anholt.net>
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+S:	Maintained
+F:	drivers/gpu/drm/tinydrm/hx8357d.c
+F:	Documentation/devicetree/bindings/display/himax,hx8357d.txt
+
 DRM DRIVER FOR INTEL I810 VIDEO CARDS
 S:	Orphan / Obsolete
 F:	drivers/gpu/drm/i810/
@@ -4778,6 +4785,12 @@
 F:	drivers/gpu/drm/nouveau/
 F:	include/uapi/drm/nouveau_drm.h
 
+DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
+M:	Stefan Mavrodiev <stefan@olimex.com>
+S:	Maintained
+F:	drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+F:	Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
+
 DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
 M:	Noralf Trønnes <noralf@tronnes.org>
 S:	Maintained
@@ -4843,10 +4856,8 @@
 
 DRM DRIVER FOR VMWARE VIRTUAL GPU
 M:	"VMware Graphics" <linux-graphics-maintainer@vmware.com>
-M:	Sinclair Yeh <syeh@vmware.com>
 M:	Thomas Hellstrom <thellstrom@vmware.com>
 L:	dri-devel@lists.freedesktop.org
-T:	git git://people.freedesktop.org/~syeh/repos_linux
 T:	git git://people.freedesktop.org/~thomash/linux
 S:	Supported
 F:	drivers/gpu/drm/vmwgfx/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1551ca7d..136ec04 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -30,13 +30,16 @@
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
 
+static DEFINE_SPINLOCK(dma_fence_stub_lock);
+static struct dma_fence dma_fence_stub;
+
 /*
  * fence context counter: each execution context should have its own
  * fence context, this allows checking if fences belong to the same
  * context or not. One device can have multiple separate contexts,
  * and they're used if some engine can run independently of another.
  */
-static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
 
 /**
  * DOC: DMA fences overview
@@ -68,6 +71,37 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
  *   &dma_buf.resv pointer.
  */
 
+static const char *dma_fence_stub_get_name(struct dma_fence *fence)
+{
+        return "stub";
+}
+
+static const struct dma_fence_ops dma_fence_stub_ops = {
+	.get_driver_name = dma_fence_stub_get_name,
+	.get_timeline_name = dma_fence_stub_get_name,
+};
+
+/**
+ * dma_fence_get_stub - return a signaled fence
+ *
+ * Return a stub fence which is already signaled.
+ */
+struct dma_fence *dma_fence_get_stub(void)
+{
+	spin_lock(&dma_fence_stub_lock);
+	if (!dma_fence_stub.ops) {
+		dma_fence_init(&dma_fence_stub,
+			       &dma_fence_stub_ops,
+			       &dma_fence_stub_lock,
+			       0, 0);
+		dma_fence_signal_locked(&dma_fence_stub);
+	}
+	spin_unlock(&dma_fence_stub_lock);
+
+	return dma_fence_get(&dma_fence_stub);
+}
+EXPORT_SYMBOL(dma_fence_get_stub);
+
 /**
  * dma_fence_context_alloc - allocate an array of fence contexts
  * @num: amount of contexts to allocate
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 6c95f61a..c161833 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -56,9 +56,10 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
 
 /**
- * reservation_object_reserve_shared - Reserve space to add a shared
- * fence to a reservation_object.
+ * reservation_object_reserve_shared - Reserve space to add shared fences to
+ * a reservation_object.
  * @obj: reservation object
+ * @num_fences: number of fences we want to add
  *
  * Should be called before reservation_object_add_shared_fence().  Must
  * be called with obj->lock held.
@@ -66,107 +67,27 @@ EXPORT_SYMBOL(reservation_seqcount_string);
  * RETURNS
  * Zero for success, or -errno
  */
-int reservation_object_reserve_shared(struct reservation_object *obj)
+int reservation_object_reserve_shared(struct reservation_object *obj,
+				      unsigned int num_fences)
 {
-	struct reservation_object_list *fobj, *old;
-	u32 max;
+	struct reservation_object_list *old, *new;
+	unsigned int i, j, k, max;
 
 	old = reservation_object_get_list(obj);
 
 	if (old && old->shared_max) {
-		if (old->shared_count < old->shared_max) {
-			/* perform an in-place update */
-			kfree(obj->staged);
-			obj->staged = NULL;
+		if ((old->shared_count + num_fences) <= old->shared_max)
 			return 0;
-		} else
-			max = old->shared_max * 2;
-	} else
-		max = 4;
-
-	/*
-	 * resize obj->staged or allocate if it doesn't exist,
-	 * noop if already correct size
-	 */
-	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
-			GFP_KERNEL);
-	if (!fobj)
-		return -ENOMEM;
-
-	obj->staged = fobj;
-	fobj->shared_max = max;
-	return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-static void
-reservation_object_add_shared_inplace(struct reservation_object *obj,
-				      struct reservation_object_list *fobj,
-				      struct dma_fence *fence)
-{
-	struct dma_fence *signaled = NULL;
-	u32 i, signaled_idx;
-
-	dma_fence_get(fence);
-
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
-
-	for (i = 0; i < fobj->shared_count; ++i) {
-		struct dma_fence *old_fence;
-
-		old_fence = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(obj));
-
-		if (old_fence->context == fence->context) {
-			/* memory barrier is added by write_seqcount_begin */
-			RCU_INIT_POINTER(fobj->shared[i], fence);
-			write_seqcount_end(&obj->seq);
-			preempt_enable();
-
-			dma_fence_put(old_fence);
-			return;
-		}
-
-		if (!signaled && dma_fence_is_signaled(old_fence)) {
-			signaled = old_fence;
-			signaled_idx = i;
-		}
-	}
-
-	/*
-	 * memory barrier is added by write_seqcount_begin,
-	 * fobj->shared_count is protected by this lock too
-	 */
-	if (signaled) {
-		RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+		else
+			max = max(old->shared_count + num_fences,
+				  old->shared_max * 2);
 	} else {
-		BUG_ON(fobj->shared_count >= fobj->shared_max);
-		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-		fobj->shared_count++;
+		max = 4;
 	}
 
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
-
-	dma_fence_put(signaled);
-}
-
-static void
-reservation_object_add_shared_replace(struct reservation_object *obj,
-				      struct reservation_object_list *old,
-				      struct reservation_object_list *fobj,
-				      struct dma_fence *fence)
-{
-	unsigned i, j, k;
-
-	dma_fence_get(fence);
-
-	if (!old) {
-		RCU_INIT_POINTER(fobj->shared[0], fence);
-		fobj->shared_count = 1;
-		goto done;
-	}
+	new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+	if (!new)
+		return -ENOMEM;
 
 	/*
 	 * no need to bump fence refcounts, rcu_read access
@@ -174,46 +95,45 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
 	 * references from the old struct are carried over to
 	 * the new.
 	 */
-	for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
-		struct dma_fence *check;
+	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+		struct dma_fence *fence;
 
-		check = rcu_dereference_protected(old->shared[i],
-						reservation_object_held(obj));
-
-		if (check->context == fence->context ||
-		    dma_fence_is_signaled(check))
-			RCU_INIT_POINTER(fobj->shared[--k], check);
+		fence = rcu_dereference_protected(old->shared[i],
+						  reservation_object_held(obj));
+		if (dma_fence_is_signaled(fence))
+			RCU_INIT_POINTER(new->shared[--k], fence);
 		else
-			RCU_INIT_POINTER(fobj->shared[j++], check);
+			RCU_INIT_POINTER(new->shared[j++], fence);
 	}
-	fobj->shared_count = j;
-	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-	fobj->shared_count++;
+	new->shared_count = j;
+	new->shared_max = max;
 
-done:
 	preempt_disable();
 	write_seqcount_begin(&obj->seq);
 	/*
 	 * RCU_INIT_POINTER can be used here,
 	 * seqcount provides the necessary barriers
 	 */
-	RCU_INIT_POINTER(obj->fence, fobj);
+	RCU_INIT_POINTER(obj->fence, new);
 	write_seqcount_end(&obj->seq);
 	preempt_enable();
 
 	if (!old)
-		return;
+		return 0;
 
 	/* Drop the references to the signaled fences */
-	for (i = k; i < fobj->shared_max; ++i) {
-		struct dma_fence *f;
+	for (i = k; i < new->shared_max; ++i) {
+		struct dma_fence *fence;
 
-		f = rcu_dereference_protected(fobj->shared[i],
-					      reservation_object_held(obj));
-		dma_fence_put(f);
+		fence = rcu_dereference_protected(new->shared[i],
+						  reservation_object_held(obj));
+		dma_fence_put(fence);
 	}
 	kfree_rcu(old, rcu);
+
+	return 0;
 }
+EXPORT_SYMBOL(reservation_object_reserve_shared);
 
 /**
  * reservation_object_add_shared_fence - Add a fence to a shared slot
@@ -226,15 +146,39 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct dma_fence *fence)
 {
-	struct reservation_object_list *old, *fobj = obj->staged;
+	struct reservation_object_list *fobj;
+	unsigned int i, count;
 
-	old = reservation_object_get_list(obj);
-	obj->staged = NULL;
+	dma_fence_get(fence);
 
-	if (!fobj)
-		reservation_object_add_shared_inplace(obj, old, fence);
-	else
-		reservation_object_add_shared_replace(obj, old, fobj, fence);
+	fobj = reservation_object_get_list(obj);
+	count = fobj->shared_count;
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
+	for (i = 0; i < count; ++i) {
+		struct dma_fence *old_fence;
+
+		old_fence = rcu_dereference_protected(fobj->shared[i],
+						      reservation_object_held(obj));
+		if (old_fence->context == fence->context ||
+		    dma_fence_is_signaled(old_fence)) {
+			dma_fence_put(old_fence);
+			goto replace;
+		}
+	}
+
+	BUG_ON(fobj->shared_count >= fobj->shared_max);
+	count++;
+
+replace:
+	RCU_INIT_POINTER(fobj->shared[i], fence);
+	/* pointer update must be visible before we extend the shared_count */
+	smp_store_mb(fobj->shared_count, count);
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
@@ -343,9 +287,6 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
-	kfree(dst->staged);
-	dst->staged = NULL;
-
 	src_list = reservation_object_get_list(dst);
 	old = reservation_object_get_excl(dst);
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index bc6a16a3..ce8d1d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,8 +10,8 @@
 		drm_scatter.o drm_pci.o \
 		drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
-		drm_info.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o drm_prime.o \
+		drm_encoder_slave.o \
+		drm_trace_points.o drm_prime.o \
 		drm_rect.o drm_vma_manager.o drm_flip_work.o \
 		drm_modeset_lock.o drm_atomic.o drm_bridge.o \
 		drm_framebuffer.o drm_connector.o drm_blend.o \
@@ -32,11 +32,12 @@
 drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
 		drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
 		drm_simple_kms_helper.o drm_modeset_helper.o \
-		drm_scdc_helper.o drm_gem_framebuffer_helper.o
+		drm_scdc_helper.o drm_gem_framebuffer_helper.o \
+		drm_atomic_state_helper.o drm_damage_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb78..f76bcb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,7 +53,7 @@
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
 	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
 	amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
-	amdgpu_gmc.o amdgpu_xgmi.o
+	amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -105,6 +105,7 @@
 # add GFX block
 amdgpu-y += \
 	amdgpu_gfx.o \
+	amdgpu_rlc.o \
 	gfx_v8_0.o \
 	gfx_v9_0.o
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b0fc116..bcef6ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -75,11 +75,14 @@
 #include "amdgpu_sdma.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
 #include "amdgpu_gart.h"
 #include "amdgpu_debugfs.h"
 #include "amdgpu_job.h"
 #include "amdgpu_bo_list.h"
 #include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
 
 #define MAX_GPU_INSTANCE		16
 
@@ -161,6 +164,7 @@ extern int amdgpu_si_support;
 extern int amdgpu_cik_support;
 #endif
 
+#define AMDGPU_VM_MAX_NUM_CTX			4096
 #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
 #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
@@ -360,123 +364,6 @@ int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
 /*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
-	AMDGPU_DOORBELL_KIQ                     = 0x000,
-	AMDGPU_DOORBELL_HIQ                     = 0x001,
-	AMDGPU_DOORBELL_DIQ                     = 0x002,
-	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
-	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
-	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
-	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
-	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
-	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
-	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
-	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
-	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
-	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
-	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
-	AMDGPU_DOORBELL_IH                      = 0x1E8,
-	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
-	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
-	/* doorbell mmio */
-	resource_size_t		base;
-	resource_size_t		size;
-	u32 __iomem		*ptr;
-	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
-	/*
-	 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
-	 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
-	 *  Compute related doorbells are allocated from 0x00 to 0x8a
-	 */
-
-
-	/* kernel scheduling */
-	AMDGPU_DOORBELL64_KIQ                     = 0x00,
-
-	/* HSA interface queue and debug queue */
-	AMDGPU_DOORBELL64_HIQ                     = 0x01,
-	AMDGPU_DOORBELL64_DIQ                     = 0x02,
-
-	/* Compute engines */
-	AMDGPU_DOORBELL64_MEC_RING0               = 0x03,
-	AMDGPU_DOORBELL64_MEC_RING1               = 0x04,
-	AMDGPU_DOORBELL64_MEC_RING2               = 0x05,
-	AMDGPU_DOORBELL64_MEC_RING3               = 0x06,
-	AMDGPU_DOORBELL64_MEC_RING4               = 0x07,
-	AMDGPU_DOORBELL64_MEC_RING5               = 0x08,
-	AMDGPU_DOORBELL64_MEC_RING6               = 0x09,
-	AMDGPU_DOORBELL64_MEC_RING7               = 0x0a,
-
-	/* User queue doorbell range (128 doorbells) */
-	AMDGPU_DOORBELL64_USERQUEUE_START         = 0x0b,
-	AMDGPU_DOORBELL64_USERQUEUE_END           = 0x8a,
-
-	/* Graphics engine */
-	AMDGPU_DOORBELL64_GFX_RING0               = 0x8b,
-
-	/*
-	 * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
-	 * Graphics voltage island aperture 1
-	 * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
-	 */
-
-	/* sDMA engines  reserved from 0xe0 -oxef  */
-	AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xE0,
-	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xE1,
-	AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xE8,
-	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xE9,
-
-	/* For vega10 sriov, the sdma doorbell must be fixed as follow
-	 * to keep the same setting with host driver, or it will
-	 * happen conflicts
-	 */
-	AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0            = 0xF0,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xF1,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1            = 0xF2,
-	AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xF3,
-
-	/* Interrupt handler */
-	AMDGPU_DOORBELL64_IH                      = 0xF4,  /* For legacy interrupt ring buffer */
-	AMDGPU_DOORBELL64_IH_RING1                = 0xF5,  /* For page migration request log */
-	AMDGPU_DOORBELL64_IH_RING2                = 0xF6,  /* For page migration translation/invalidation log */
-
-	/* VCN engine use 32 bits doorbell  */
-	AMDGPU_DOORBELL64_VCN0_1                  = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
-	AMDGPU_DOORBELL64_VCN2_3                  = 0xF9,
-	AMDGPU_DOORBELL64_VCN4_5                  = 0xFA,
-	AMDGPU_DOORBELL64_VCN6_7                  = 0xFB,
-
-	/* overlap the doorbell assignment with VCN as they are  mutually exclusive
-	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
-	 */
-	AMDGPU_DOORBELL64_UVD_RING0_1             = 0xF8,
-	AMDGPU_DOORBELL64_UVD_RING2_3             = 0xF9,
-	AMDGPU_DOORBELL64_UVD_RING4_5             = 0xFA,
-	AMDGPU_DOORBELL64_UVD_RING6_7             = 0xFB,
-
-	AMDGPU_DOORBELL64_VCE_RING0_1             = 0xFC,
-	AMDGPU_DOORBELL64_VCE_RING2_3             = 0xFD,
-	AMDGPU_DOORBELL64_VCE_RING4_5             = 0xFE,
-	AMDGPU_DOORBELL64_VCE_RING6_7             = 0xFF,
-
-	AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
-	AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
  * IRQS.
  */
 
@@ -653,6 +540,8 @@ struct amdgpu_asic_funcs {
 			       struct amdgpu_ring *ring);
 	/* check if the asic needs a full reset of if soft reset will work */
 	bool (*need_full_reset)(struct amdgpu_device *adev);
+	/* initialize doorbell layout for specific asic*/
+	void (*init_doorbell_index)(struct amdgpu_device *adev);
 };
 
 /*
@@ -831,7 +720,6 @@ struct amdgpu_device {
 	bool				need_dma32;
 	bool				need_swiotlb;
 	bool				accel_working;
-	struct work_struct		reset_work;
 	struct notifier_block		acpi_nb;
 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
@@ -976,6 +864,9 @@ struct amdgpu_device {
 	/* GDS */
 	struct amdgpu_gds		gds;
 
+	/* KFD */
+	struct amdgpu_kfd_dev		kfd;
+
 	/* display related functionality */
 	struct amdgpu_display_manager dm;
 
@@ -989,9 +880,6 @@ struct amdgpu_device {
 	atomic64_t visible_pin_size;
 	atomic64_t gart_pin_size;
 
-	/* amdkfd interface */
-	struct kfd_dev          *kfd;
-
 	/* soc15 register offset based on ip, instance and  segment */
 	uint32_t 		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
@@ -1023,6 +911,10 @@ struct amdgpu_device {
 	unsigned long last_mm_index;
 	bool                            in_gpu_reset;
 	struct mutex  lock_reset;
+	struct amdgpu_doorbell_index doorbell_index;
+
+	int asic_reset_res;
+	struct work_struct		xgmi_reset_work;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
 
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
@@ -1113,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
 
@@ -1159,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
 
 /* Common functions */
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
 /*
  * functions used by amdgpu_encoder.c
  */
@@ -1252,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
 						u8 perf_req, bool advertise);
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+		struct amdgpu_dm_backlight_caps *caps);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc5..4376b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
 };
 
 struct amdgpu_atif_notifications {
-	bool display_switch;
-	bool expansion_mode_change;
 	bool thermal_state;
 	bool forced_power_state;
 	bool system_power_state;
-	bool display_conf_change;
-	bool px_gfx_switch;
 	bool brightness_change;
 	bool dgpu_display_event;
+	bool gpu_package_power_limit;
 };
 
 struct amdgpu_atif_functions {
 	bool system_params;
 	bool sbios_requests;
-	bool select_active_disp;
-	bool lid_state;
-	bool get_tv_standard;
-	bool set_tv_standard;
-	bool get_panel_expansion_mode;
-	bool set_panel_expansion_mode;
 	bool temperature_change;
-	bool graphics_device_types;
+	bool query_backlight_transfer_characteristics;
+	bool ready_to_undock;
+	bool external_gpu_information;
 };
 
 struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
 	struct amdgpu_atif_functions functions;
 	struct amdgpu_atif_notification_cfg notification_cfg;
 	struct amdgpu_encoder *encoder_for_bl;
+	struct amdgpu_dm_backlight_caps backlight_caps;
 };
 
 /* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
  */
 static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
 {
-	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
-	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
 	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
 	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
 	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
-	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
-	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
 	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
 	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+	n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
 }
 
 /**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
 {
 	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
 	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
-	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
-	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
-	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
-	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
-	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
-	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
 	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
-	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+	f->query_backlight_transfer_characteristics =
+		mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+	f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+	f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
 }
 
 /**
@@ -311,6 +299,65 @@ static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
 }
 
 /**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_qbtc_output characteristics;
+	struct atif_qbtc_arguments arguments;
+	struct acpi_buffer params;
+	size_t size;
+	int err = 0;
+
+	arguments.size = sizeof(arguments);
+	arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+	params.length = sizeof(arguments);
+	params.pointer = (void *)&arguments;
+
+	info = amdgpu_atif_call(atif,
+		ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+		&params);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&characteristics, 0, sizeof(characteristics));
+	size = min(sizeof(characteristics), size);
+	memcpy(&characteristics, info->buffer.pointer, size);
+
+	atif->backlight_caps.caps_valid = true;
+	atif->backlight_caps.min_input_signal =
+			characteristics.min_input_signal;
+	atif->backlight_caps.max_input_signal =
+			characteristics.max_input_signal;
+out:
+	kfree(info);
+	return err;
+}
+
+/**
  * amdgpu_atif_get_sbios_requests - get requested sbios event
  *
  * @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
 		}
 	}
 
+	if (atif->functions.query_backlight_transfer_characteristics) {
+		ret = amdgpu_atif_query_backlight_caps(atif);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+					ret);
+			atif->backlight_caps.caps_valid = false;
+		}
+	} else {
+		atif->backlight_caps.caps_valid = false;
+	}
+
 out:
 	adev->acpi_nb.notifier_call = amdgpu_acpi_event;
 	register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
 	return ret;
 }
 
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+		struct amdgpu_dm_backlight_caps *caps)
+{
+	if (!adev->atif) {
+		caps->caps_valid = false;
+		return;
+	}
+	caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+	caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+	caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
 /**
  * amdgpu_acpi_fini - tear down driver acpi support
  *
@@ -816,6 +886,5 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
 void amdgpu_acpi_fini(struct amdgpu_device *adev)
 {
 	unregister_acpi_notifier(&adev->acpi_nb);
-	if (adev->atif)
-		kfree(adev->atif);
+	kfree(adev->atif);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1580ec6..2dfaf15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
 #include <linux/module.h>
+#include <linux/dma-buf.h>
 
 const struct kgd2kfd_calls *kgd2kfd;
 
 static const unsigned int compute_vmid_bitmap = 0xFF00;
 
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
 int amdgpu_amdkfd_init(void)
 {
+	struct sysinfo si;
 	int ret;
 
+	si_meminfo(&si);
+	amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
 #ifdef CONFIG_HSA_AMD
 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
 	if (ret)
@@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 		return;
 	}
 
-	adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
-				   adev->pdev, kfd2kgd);
+	adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+				       adev->pdev, kfd2kgd);
+
+	if (adev->kfd.dev)
+		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 }
 
 /**
@@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
 	int i, n;
 	int last_valid_bit;
-	if (adev->kfd) {
+
+	if (adev->kfd.dev) {
 		struct kgd2kfd_shared_resources gpu_resources = {
 			.compute_vmid_bitmap = compute_vmid_bitmap,
 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -144,7 +161,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				  KGD_MAX_QUEUES);
 
 		/* remove the KIQ bit as well */
-		if (adev->gfx.kiq.ring.ready)
+		if (adev->gfx.kiq.ring.sched.ready)
 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
 							  adev->gfx.kiq.ring.me - 1,
 							  adev->gfx.kiq.ring.pipe,
@@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				&gpu_resources.doorbell_start_offset);
 
 		if (adev->asic_type < CHIP_VEGA10) {
-			kgd2kfd->device_init(adev->kfd, &gpu_resources);
+			kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
 			return;
 		}
 
@@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 			 * process in case of 64-bit doorbells so we
 			 * can use each doorbell assignment twice.
 			 */
-			if (adev->asic_type == CHIP_VEGA10) {
-				gpu_resources.sdma_doorbell[0][i] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
-				gpu_resources.sdma_doorbell[0][i+1] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i+1] =
-					AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
-			} else {
-				gpu_resources.sdma_doorbell[0][i] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
-				gpu_resources.sdma_doorbell[0][i+1] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
-				gpu_resources.sdma_doorbell[1][i+1] =
-					AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
-			}
+			gpu_resources.sdma_doorbell[0][i] =
+				adev->doorbell_index.sdma_engine0 + (i >> 1);
+			gpu_resources.sdma_doorbell[0][i+1] =
+				adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+			gpu_resources.sdma_doorbell[1][i] =
+				adev->doorbell_index.sdma_engine1 + (i >> 1);
+			gpu_resources.sdma_doorbell[1][i+1] =
+				adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
 		}
 		/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
 		 * SDMA, IH and VCN. So don't use them for the CP.
@@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 		gpu_resources.reserved_doorbell_mask = 0x1e0;
 		gpu_resources.reserved_doorbell_val  = 0x0e0;
 
-		kgd2kfd->device_init(adev->kfd, &gpu_resources);
+		kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
 	}
 }
 
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
 {
-	if (adev->kfd) {
-		kgd2kfd->device_exit(adev->kfd);
-		adev->kfd = NULL;
+	if (adev->kfd.dev) {
+		kgd2kfd->device_exit(adev->kfd.dev);
+		adev->kfd.dev = NULL;
 	}
 }
 
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
 		const void *ih_ring_entry)
 {
-	if (adev->kfd)
-		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+	if (adev->kfd.dev)
+		kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
 {
-	if (adev->kfd)
-		kgd2kfd->suspend(adev->kfd);
+	if (adev->kfd.dev)
+		kgd2kfd->suspend(adev->kfd.dev);
 }
 
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
 {
 	int r = 0;
 
-	if (adev->kfd)
-		r = kgd2kfd->resume(adev->kfd);
+	if (adev->kfd.dev)
+		r = kgd2kfd->resume(adev->kfd.dev);
 
 	return r;
 }
@@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
 {
 	int r = 0;
 
-	if (adev->kfd)
-		r = kgd2kfd->pre_reset(adev->kfd);
+	if (adev->kfd.dev)
+		r = kgd2kfd->pre_reset(adev->kfd.dev);
 
 	return r;
 }
@@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
 {
 	int r = 0;
 
-	if (adev->kfd)
-		r = kgd2kfd->post_reset(adev->kfd);
+	if (adev->kfd.dev)
+		r = kgd2kfd->post_reset(adev->kfd.dev);
 
 	return r;
 }
@@ -268,9 +274,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 		amdgpu_device_gpu_recover(adev, NULL);
 }
 
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-			void **mem_obj, uint64_t *gpu_addr,
-			void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+				void **mem_obj, uint64_t *gpu_addr,
+				void **cpu_ptr, bool mqd_gfx9)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_bo *bo = NULL;
@@ -340,7 +346,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 	return r;
 }
 
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 {
 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
 
@@ -351,8 +357,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
 	amdgpu_bo_unref(&(bo));
 }
 
-void get_local_mem_info(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+				      struct kfd_local_mem_info *mem_info)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +389,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
 		mem_info->mem_clk_max = 100;
 }
 
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -392,7 +398,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
 	return 0;
 }
 
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
@@ -405,7 +411,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 		return 100;
 }
 
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
@@ -428,6 +434,62 @@ void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
 	cu_info->lds_size = acu_info.lds_size;
 }
 
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+				  struct kgd_dev **dma_buf_kgd,
+				  uint64_t *bo_size, void *metadata_buffer,
+				  size_t buffer_size, uint32_t *metadata_size,
+				  uint32_t *flags)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+	struct dma_buf *dma_buf;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *bo;
+	uint64_t metadata_flags;
+	int r = -EINVAL;
+
+	dma_buf = dma_buf_get(dma_buf_fd);
+	if (IS_ERR(dma_buf))
+		return PTR_ERR(dma_buf);
+
+	if (dma_buf->ops != &amdgpu_dmabuf_ops)
+		/* Can't handle non-graphics buffers */
+		goto out_put;
+
+	obj = dma_buf->priv;
+	if (obj->dev->driver != adev->ddev->driver)
+		/* Can't handle buffers from different drivers */
+		goto out_put;
+
+	adev = obj->dev->dev_private;
+	bo = gem_to_amdgpu_bo(obj);
+	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+				    AMDGPU_GEM_DOMAIN_GTT)))
+		/* Only VRAM and GTT BOs are supported */
+		goto out_put;
+
+	r = 0;
+	if (dma_buf_kgd)
+		*dma_buf_kgd = (struct kgd_dev *)adev;
+	if (bo_size)
+		*bo_size = amdgpu_bo_size(bo);
+	if (metadata_size)
+		*metadata_size = bo->metadata_size;
+	if (metadata_buffer)
+		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+					   metadata_size, &metadata_flags);
+	if (flags) {
+		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+			ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+			*flags |= ALLOC_MEM_FLAGS_PUBLIC;
+	}
+
+out_put:
+	dma_buf_put(dma_buf);
+	return r;
+}
+
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -510,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 {
-	if (adev->kfd) {
+	if (adev->kfd.dev) {
 		if ((1 << vmid) & compute_vmid_bitmap)
 			return true;
 	}
@@ -524,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
 	return false;
 }
 
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 {
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8e0d4f7..70429f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/mmu_context.h>
 #include <linux/workqueue.h>
 #include <kgd_kfd_interface.h>
 #include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
 #include "amdgpu_vm.h"
 
 extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
 
 struct amdgpu_device;
 
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
 	char timeline_name[TASK_COMM_LEN];
 };
 
+struct amdgpu_kfd_dev {
+	struct kfd_dev *dev;
+	uint64_t vram_used;
+};
+
 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
 						       struct mm_struct *mm);
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -134,16 +139,21 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 
 /* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
-			void **mem_obj, uint64_t *gpu_addr,
-			void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+				void **mem_obj, uint64_t *gpu_addr,
+				void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+				      struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
 
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+				  struct kgd_dev **dmabuf_kgd,
+				  uint64_t *bo_size, void *metadata_buffer,
+				  size_t buffer_size, uint32_t *metadata_size,
+				  uint32_t *flags);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
 					      struct kfd_vm_fault_info *info);
 
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+				      struct dma_buf *dmabuf,
+				      uint64_t va, void *vm,
+				      struct kgd_mem **mem, uint64_t *size,
+				      uint64_t *mmap_offset);
+
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
 
 #endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 244d983..ff7fac7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
@@ -173,13 +174,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -200,28 +194,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
 	.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9f14991..56ea929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
@@ -128,13 +129,6 @@ static int get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -157,27 +151,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 42cb4c4..5c51d491 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
@@ -46,38 +47,9 @@
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
 
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ				0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX		0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK				0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX		0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32		0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32		0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32		0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32		0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32		0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX	0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32		0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX	0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32		0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX	0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32		0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX	0
 
 #define V9_PIPE_PER_MEC		(4)
 #define V9_QUEUES_PER_PIPE_MEC	(8)
@@ -167,13 +139,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
 }
 
 static const struct kfd2kgd_calls kfd2kgd = {
-	.init_gtt_mem_allocation = alloc_gtt_mem,
-	.free_gtt_mem = free_gtt_mem,
-	.get_local_mem_info = get_local_mem_info,
-	.get_gpu_clock_counter = get_gpu_clock_counter,
-	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
-	.alloc_pasid = amdgpu_pasid_alloc,
-	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
 	.init_interrupts = kgd_init_interrupts,
@@ -196,26 +161,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.get_fw_version = get_fw_version,
 	.set_scratch_backing_va = set_scratch_backing_va,
 	.get_tile_config = amdgpu_amdkfd_get_tile_config,
-	.get_cu_info = get_cu_info,
-	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
-	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
-	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
-	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
-	.release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
-	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
-	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
-	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
-	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
-	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
-	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
-	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
-	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
 	.invalidate_tlbs = invalidate_tlbs,
 	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-	.submit_ib = amdgpu_amdkfd_submit_ib,
-	.gpu_recover = amdgpu_amdkfd_gpu_reset,
-	.set_compute_idle = amdgpu_amdkfd_set_compute_idle,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
 
@@ -785,15 +733,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-	uint32_t req = (1 << vmid) |
-		(0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
-		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
-	mutex_lock(&adev->srbm_mutex);
 
 	/* Use legacy mode tlb invalidation.
 	 *
@@ -810,34 +749,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 	 * TODO 2: support range-based invalidation, requires kfg2kgd
 	 * interface change
 	 */
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-				0xffffffff);
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-				0x0000001f);
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
-				0xffffffff);
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
-				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
-				0x0000001f);
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
-				req);
-
-	while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
-					(1 << vmid)))
-		cpu_relax();
-
-	while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
-					mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
-					(1 << vmid)))
-		cpu_relax();
-
-	mutex_unlock(&adev->srbm_mutex);
-
+	amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
 }
 
 static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +788,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
 	if (adev->in_gpu_reset)
 		return -EIO;
 
-	if (ring->ready)
+	if (ring->sched.ready)
 		return invalidate_tlbs_with_kiq(adev, pasid);
 
 	for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +928,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 		uint64_t page_table_base)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
-	uint64_t base = page_table_base | AMDGPU_PTE_VALID;
 
 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
 		pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +939,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
 	 * now, all processes share the same address space size, like
 	 * on GFX8 and older.
 	 */
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
+	mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-			lower_32_bits(adev->vm_manager.max_pfn - 1));
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-			upper_32_bits(adev->vm_manager.max_pfn - 1));
-
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
-			lower_32_bits(adev->vm_manager.max_pfn - 1));
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
-			upper_32_bits(adev->vm_manager.max_pfn - 1));
-
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059..be1ab43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
 #include <drm/drmP.h>
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
 /* Impose limit on how much memory KFD can use */
 static struct {
 	uint64_t max_system_mem_limit;
-	uint64_t max_userptr_mem_limit;
+	uint64_t max_ttm_mem_limit;
 	int64_t system_mem_used;
-	int64_t userptr_mem_used;
+	int64_t ttm_mem_used;
 	spinlock_t mem_limit_lock;
 } kfd_mem_limit;
 
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
 }
 
 /* Set memory usage limits. Current, limits are
- *  System (kernel) memory - 3/8th System RAM
- *  Userptr memory - 3/4th System RAM
+ *  System (TTM + userptr) memory - 3/4th System RAM
+ *  TTM memory - 3/8th System RAM
  */
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 {
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 	mem *= si.mem_unit;
 
 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
-	kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
-	kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
-	pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+	kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
 		(kfd_mem_limit.max_system_mem_limit >> 20),
-		(kfd_mem_limit.max_userptr_mem_limit >> 20));
+		(kfd_mem_limit.max_ttm_mem_limit >> 20));
 }
 
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
-					      uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+		uint64_t size, u32 domain, bool sg)
 {
-	size_t acc_size;
+	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+	uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
 	int ret = 0;
 
 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 				       sizeof(struct amdgpu_bo));
 
-	spin_lock(&kfd_mem_limit.mem_limit_lock);
+	vram_needed = 0;
 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
-		if (kfd_mem_limit.system_mem_used + (acc_size + size) >
-			kfd_mem_limit.max_system_mem_limit) {
-			ret = -ENOMEM;
-			goto err_no_mem;
-		}
-		kfd_mem_limit.system_mem_used += (acc_size + size);
-	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
-		if ((kfd_mem_limit.system_mem_used + acc_size >
-			kfd_mem_limit.max_system_mem_limit) ||
-			(kfd_mem_limit.userptr_mem_used + (size + acc_size) >
-			kfd_mem_limit.max_userptr_mem_limit)) {
-			ret = -ENOMEM;
-			goto err_no_mem;
-		}
-		kfd_mem_limit.system_mem_used += acc_size;
-		kfd_mem_limit.userptr_mem_used += size;
+		/* TTM GTT memory */
+		system_mem_needed = acc_size + size;
+		ttm_mem_needed = acc_size + size;
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+		/* Userptr */
+		system_mem_needed = acc_size + size;
+		ttm_mem_needed = acc_size;
+	} else {
+		/* VRAM and SG */
+		system_mem_needed = acc_size;
+		ttm_mem_needed = acc_size;
+		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+			vram_needed = size;
 	}
-err_no_mem:
+
+	spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+	     kfd_mem_limit.max_system_mem_limit) ||
+	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+	     kfd_mem_limit.max_ttm_mem_limit) ||
+	    (adev->kfd.vram_used + vram_needed >
+	     adev->gmc.real_vram_size - reserved_for_pt)) {
+		ret = -ENOMEM;
+	} else {
+		kfd_mem_limit.system_mem_used += system_mem_needed;
+		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+		adev->kfd.vram_used += vram_needed;
+	}
+
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 	return ret;
 }
 
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
-				       uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+		uint64_t size, u32 domain, bool sg)
 {
 	size_t acc_size;
 
@@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 		kfd_mem_limit.system_mem_used -= (acc_size + size);
-	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+		kfd_mem_limit.system_mem_used -= (acc_size + size);
+		kfd_mem_limit.ttm_mem_used -= acc_size;
+	} else {
 		kfd_mem_limit.system_mem_used -= acc_size;
-		kfd_mem_limit.userptr_mem_used -= size;
+		kfd_mem_limit.ttm_mem_used -= acc_size;
+		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+			adev->kfd.vram_used -= size;
+			WARN_ONCE(adev->kfd.vram_used < 0,
+				  "kfd VRAM memory accounting unbalanced");
+		}
 	}
 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 		  "kfd system memory accounting unbalanced");
-	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
-		  "kfd userptr memory accounting unbalanced");
+	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+		  "kfd TTM memory accounting unbalanced");
 
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
 
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 {
-	spin_lock(&kfd_mem_limit.mem_limit_lock);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	u32 domain = bo->preferred_domains;
+	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
 
 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
-		kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
-		kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
-	} else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
-		kfd_mem_limit.system_mem_used -=
-			(bo->tbo.acc_size + amdgpu_bo_size(bo));
+		domain = AMDGPU_GEM_DOMAIN_CPU;
+		sg = false;
 	}
-	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
-		  "kfd system memory accounting unbalanced");
-	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
-		  "kfd userptr memory accounting unbalanced");
 
-	spin_unlock(&kfd_mem_limit.mem_limit_lock);
+	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
 }
 
 
@@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
 	return 0;
 }
 
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-			 struct dma_fence *f)
-{
-	int ret = amdgpu_sync_fence(adev, sync, f, false);
-
-	/* Sync objects can't handle multiple GPUs (contexts) updating
-	 * sync->last_vm_update. Fortunately we don't need it for
-	 * KFD's purposes, so we can just drop that fence.
-	 */
-	if (sync->last_vm_update) {
-		dma_fence_put(sync->last_vm_update);
-		sync->last_vm_update = NULL;
-	}
-
-	return ret;
-}
-
 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
 	struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 	if (ret)
 		return ret;
 
-	return sync_vm_fence(adev, sync, vm->last_update);
+	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
 }
 
 /* add_bo_to_vm - Add a BO to a VM
@@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
 	struct amdgpu_bo *bo = mem->bo;
 
 	INIT_LIST_HEAD(&entry->head);
-	entry->shared = true;
+	entry->num_shared = 1;
 	entry->bo = &bo->tbo;
 	mutex_lock(&process_info->lock);
 	if (userptr)
@@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
 
 	ctx->kfd_bo.priority = 0;
 	ctx->kfd_bo.tv.bo = &bo->tbo;
-	ctx->kfd_bo.tv.shared = true;
+	ctx->kfd_bo.tv.num_shared = 1;
 	ctx->kfd_bo.user_pages = NULL;
 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 
@@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
 
 	ctx->kfd_bo.priority = 0;
 	ctx->kfd_bo.tv.bo = &bo->tbo;
-	ctx->kfd_bo.tv.shared = true;
+	ctx->kfd_bo.tv.num_shared = 1;
 	ctx->kfd_bo.user_pages = NULL;
 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 
@@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 	/* Add the eviction fence back */
 	amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
 
-	sync_vm_fence(adev, sync, bo_va->last_pt_update);
+	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
 
 	return 0;
 }
@@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
 		return ret;
 	}
 
-	return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
 }
 
 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +887,24 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
 	return ret;
 }
 
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+	if (!sg)
+		return NULL;
+	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+		kfree(sg);
+		return NULL;
+	}
+	sg->sgl->dma_address = addr;
+	sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+	sg->sgl->dma_length = size;
+#endif
+	return sg;
+}
+
 static int process_validate_vms(struct amdkfd_process_info *process_info)
 {
 	struct amdgpu_vm *peer_vm;
@@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
 	return 0;
 }
 
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+				 struct amdgpu_sync *sync)
+{
+	struct amdgpu_vm *peer_vm;
+	int ret;
+
+	list_for_each_entry(peer_vm, &process_info->vm_list_head,
+			    vm_list_node) {
+		struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+		ret = amdgpu_sync_resv(NULL,
+					sync, pd->tbo.resv,
+					AMDGPU_FENCE_OWNER_UNDEFINED, false);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int process_update_pds(struct amdkfd_process_info *process_info,
 			      struct amdgpu_sync *sync)
 {
@@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+	enum ttm_bo_type bo_type = ttm_bo_type_device;
+	struct sg_table *sg = NULL;
 	uint64_t user_addr = 0;
 	struct amdgpu_bo *bo;
 	struct amdgpu_bo_param bp;
@@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 		if (!offset || !*offset)
 			return -EINVAL;
 		user_addr = *offset;
+	} else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+		bo_type = ttm_bo_type_sg;
+		alloc_flags = 0;
+		if (size > UINT_MAX)
+			return -EINVAL;
+		sg = create_doorbell_sg(*offset, size);
+		if (!sg)
+			return -ENOMEM;
 	} else {
 		return -EINVAL;
 	}
 
 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
-	if (!*mem)
-		return -ENOMEM;
+	if (!*mem) {
+		ret = -ENOMEM;
+		goto err;
+	}
 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
 	mutex_init(&(*mem)->lock);
 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	byte_align = (adev->family == AMDGPU_FAMILY_VI &&
 			adev->asic_type != CHIP_FIJI &&
 			adev->asic_type != CHIP_POLARIS10 &&
-			adev->asic_type != CHIP_POLARIS11) ?
+			adev->asic_type != CHIP_POLARIS11 &&
+			adev->asic_type != CHIP_POLARIS12) ?
 			VI_BO_SIZE_ALIGN : 1;
 
 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
 	amdgpu_sync_create(&(*mem)->sync);
 
-	ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
 	if (ret) {
 		pr_debug("Insufficient system memory\n");
-		goto err_reserve_system_mem;
+		goto err_reserve_limit;
 	}
 
 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	bp.byte_align = byte_align;
 	bp.domain = alloc_domain;
 	bp.flags = alloc_flags;
-	bp.type = ttm_bo_type_device;
+	bp.type = bo_type;
 	bp.resv = NULL;
 	ret = amdgpu_bo_create(adev, &bp, &bo);
 	if (ret) {
@@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 				domain_string(alloc_domain), ret);
 		goto err_bo_create;
 	}
+	if (bo_type == ttm_bo_type_sg) {
+		bo->tbo.sg = sg;
+		bo->tbo.ttm->sg = sg;
+	}
 	bo->kfd_bo = *mem;
 	(*mem)->bo = bo;
 	if (user_addr)
@@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 allocate_init_user_pages_failed:
 	amdgpu_bo_unref(&bo);
 	/* Don't unreserve system mem limit twice */
-	goto err_reserve_system_mem;
+	goto err_reserve_limit;
 err_bo_create:
-	unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
 	mutex_destroy(&(*mem)->lock);
 	kfree(*mem);
+err:
+	if (sg) {
+		sg_free_table(sg);
+		kfree(sg);
+	}
 	return ret;
 }
 
@@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 	/* Free the sync object */
 	amdgpu_sync_free(&mem->sync);
 
+	/* If the SG is not NULL, it's one we created for a doorbell
+	 * BO. We need to free it.
+	 */
+	if (mem->bo->tbo.sg) {
+		sg_free_table(mem->bo->tbo.sg);
+		kfree(mem->bo->tbo.sg);
+	}
+
 	/* Free the BO*/
 	amdgpu_bo_unref(&mem->bo);
 	mutex_destroy(&mem->lock);
@@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	 * the queues are still stopped and we can leave mapping for
 	 * the next restore worker
 	 */
-	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
 		is_invalid_userptr = true;
 
 	if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
 	return 0;
 }
 
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+				      struct dma_buf *dma_buf,
+				      uint64_t va, void *vm,
+				      struct kgd_mem **mem, uint64_t *size,
+				      uint64_t *mmap_offset)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *bo;
+	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+	if (dma_buf->ops != &amdgpu_dmabuf_ops)
+		/* Can't handle non-graphics buffers */
+		return -EINVAL;
+
+	obj = dma_buf->priv;
+	if (obj->dev->dev_private != adev)
+		/* Can't handle buffers from other devices */
+		return -EINVAL;
+
+	bo = gem_to_amdgpu_bo(obj);
+	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+				    AMDGPU_GEM_DOMAIN_GTT)))
+		/* Only VRAM and GTT BOs are supported */
+		return -EINVAL;
+
+	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+	if (!*mem)
+		return -ENOMEM;
+
+	if (size)
+		*size = amdgpu_bo_size(bo);
+
+	if (mmap_offset)
+		*mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+	INIT_LIST_HEAD(&(*mem)->bo_va_list);
+	mutex_init(&(*mem)->lock);
+	(*mem)->mapping_flags =
+		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+	(*mem)->bo = amdgpu_bo_ref(bo);
+	(*mem)->va = va;
+	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+	(*mem)->mapped_to_gpu_memory = 0;
+	(*mem)->process_info = avm->process_info;
+	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+	amdgpu_sync_create(&(*mem)->sync);
+
+	return 0;
+}
+
 /* Evict a userptr BO by stopping the queues if necessary
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
 			    validate_list.head) {
 		list_add_tail(&mem->resv_list.head, &resv_list);
 		mem->resv_list.bo = mem->validate_list.bo;
-		mem->resv_list.shared = mem->validate_list.shared;
+		mem->resv_list.num_shared = mem->validate_list.num_shared;
 	}
 
 	/* Reserve all BOs and page tables for validation */
@@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 
 		list_add_tail(&mem->resv_list.head, &ctx.list);
 		mem->resv_list.bo = mem->validate_list.bo;
-		mem->resv_list.shared = mem->validate_list.shared;
+		mem->resv_list.num_shared = mem->validate_list.num_shared;
 	}
 
 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 	if (ret)
 		goto validate_map_fail;
 
-	/* Wait for PD/PTs validate to finish */
-	/* FIXME: I think this isn't needed */
-	list_for_each_entry(peer_vm, &process_info->vm_list_head,
-			    vm_list_node) {
-		struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
-		ttm_bo_wait(&bo->tbo, false, false);
+	ret = process_sync_pds_resv(process_info, &sync_obj);
+	if (ret) {
+		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+		goto validate_map_fail;
 	}
 
 	/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
 			goto validate_map_fail;
 		}
-
+		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+		if (ret) {
+			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+			goto validate_map_fail;
+		}
 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
 				    bo_list) {
 			ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 		goto validate_map_fail;
 	}
 
+	/* Wait for validate and PT updates to finish */
 	amdgpu_sync_wait(&sync_obj, false);
 
 	/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 	process_info->eviction_fence = new_fence;
 	*ef = dma_fence_get(&new_fence->base);
 
-	/* Wait for validate to finish and attach new eviction fence */
-	list_for_each_entry(mem, &process_info->kfd_bo_list,
-		validate_list.head)
-		ttm_bo_wait(&mem->bo->tbo, false, false);
+	/* Attach new eviction fence to all BOs */
 	list_for_each_entry(mem, &process_info->kfd_bo_list,
 		validate_list.head)
 		amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982..5c79da8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 		entry->priority = min(info[i].bo_priority,
 				      AMDGPU_BO_LIST_MAX_PRIORITY);
 		entry->tv.bo = &bo->tbo;
-		entry->tv.shared = !bo->prime_shared_count;
 
 		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
 			list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0acc8de..cf4e190 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 	p->uf_entry.priority = 0;
 	p->uf_entry.tv.bo = &bo->tbo;
-	p->uf_entry.tv.shared = true;
+	/* One for TTM and one for the CS job */
+	p->uf_entry.tv.num_shared = 2;
 	p->uf_entry.user_pages = NULL;
 
 	drm_gem_object_put_unlocked(gobj);
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 			return r;
 	}
 
+	/* One for TTM and one for the CS job */
+	amdgpu_bo_list_for_each_entry(e, p->bo_list)
+		e->tv.num_shared = 2;
+
 	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 	if (p->bo_list->first_userptr != p->bo_list->num_entries)
 		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 	gws = p->bo_list->gws_obj;
 	oa = p->bo_list->oa_obj;
 
-	amdgpu_bo_list_for_each_entry(e, p->bo_list)
-		e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+		/* Make sure we use the exclusive slot for shared BOs */
+		if (bo->prime_shared_count)
+			e->tv.num_shared = 0;
+		e->bo_va = amdgpu_vm_bo_find(vm, bo);
+	}
 
 	if (gds) {
 		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
-	if (r)
-		return r;
-
 	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
 
 	if (amdgpu_vm_debug) {
@@ -1104,7 +1111,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 {
 	int r;
 	struct dma_fence *fence;
-	r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
+	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
 	if (r)
 		return r;
 
@@ -1193,7 +1200,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 	int i;
 
 	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
-		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
+		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
 }
 
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
@@ -1260,8 +1267,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	return 0;
 
 error_abort:
-	dma_fence_put(&job->base.s_fence->finished);
-	job->base.s_fence = NULL;
+	drm_sched_job_cleanup(&job->base);
 	amdgpu_mn_unlock(p->mn);
 
 error_unlock:
@@ -1285,7 +1291,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
 	r = amdgpu_cs_parser_init(&parser, data);
 	if (r) {
-		DRM_ERROR("Failed to initialize parser !\n");
+		DRM_ERROR("Failed to initialize parser %d!\n", r);
 		goto out;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 0000000..7e22be7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+	addr -= AMDGPU_VA_RESERVED_SIZE;
+	addr = amdgpu_gmc_sign_extend(addr);
+
+	return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size)
+{
+	int r;
+	void *ptr;
+
+	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+				domain, bo,
+				NULL, &ptr);
+	if (!*bo)
+		return -ENOMEM;
+
+	memset(ptr, 0, size);
+	return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+	amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+			  uint64_t csa_addr, uint32_t size)
+{
+	struct ww_acquire_ctx ticket;
+	struct list_head list;
+	struct amdgpu_bo_list_entry pd;
+	struct ttm_validate_buffer csa_tv;
+	int r;
+
+	INIT_LIST_HEAD(&list);
+	INIT_LIST_HEAD(&csa_tv.head);
+	csa_tv.bo = &bo->tbo;
+	csa_tv.num_shared = 1;
+
+	list_add(&csa_tv.head, &list);
+	amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+	if (r) {
+		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+		return r;
+	}
+
+	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+	if (!*bo_va) {
+		ttm_eu_backoff_reservation(&ticket, &list);
+		DRM_ERROR("failed to create bo_va for static CSA\n");
+		return -ENOMEM;
+	}
+
+	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+				size);
+	if (r) {
+		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+		amdgpu_vm_bo_rmv(adev, *bo_va);
+		ttm_eu_backoff_reservation(&ticket, &list);
+		return r;
+	}
+
+	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+			     AMDGPU_PTE_EXECUTABLE);
+
+	if (r) {
+		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+		amdgpu_vm_bo_rmv(adev, *bo_va);
+		ttm_eu_backoff_reservation(&ticket, &list);
+		return r;
+	}
+
+	ttm_eu_backoff_reservation(&ticket, &list);
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 0000000..524b443
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE		(128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+			  uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 95f4c41..d85184b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -248,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 		return -ENOMEM;
 
 	mutex_lock(&mgr->lock);
-	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
 	if (r < 0) {
 		mutex_unlock(&mgr->lock);
 		kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345..b60afea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_pm.h"
 
+#include "amdgpu_xgmi.h"
+
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
  */
 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 {
+
 	/* No doorbell on SI hardware generation */
 	if (adev->asic_type < CHIP_BONAIRE) {
 		adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
 		return -EINVAL;
 
+	amdgpu_asic_init_doorbell_index(adev);
+
 	/* doorbell bar mapping */
 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 
 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
-					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+					     adev->doorbell_index.max_assignment+1);
 	if (adev->doorbell.num_doorbells == 0)
 		return -EINVAL;
 
+	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
+	 * paging queue doorbell use the second page. The
+	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+	 * doorbells are in the first page. So with paging queue enabled,
+	 * the max num_doorbells should + 1 page (0x400 in dword)
+	 */
+	if (adev->asic_type >= CHIP_VEGA10)
+		adev->doorbell.num_doorbells += 0x400;
+
 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
 				     adev->doorbell.num_doorbells *
 				     sizeof(u32));
@@ -1656,7 +1670,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
 			/* right after GMC hw init, we create CSA */
 			if (amdgpu_sriov_vf(adev)) {
-				r = amdgpu_allocate_static_csa(adev);
+				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+								AMDGPU_GEM_DOMAIN_VRAM,
+								AMDGPU_CSA_SIZE);
 				if (r) {
 					DRM_ERROR("allocate CSA failed %d\n", r);
 					return r;
@@ -1681,7 +1697,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 	if (r)
 		return r;
 
-	amdgpu_xgmi_add_device(adev);
+	if (adev->gmc.xgmi.num_physical_nodes > 1)
+		amdgpu_xgmi_add_device(adev);
 	amdgpu_amdkfd_device_init(adev);
 
 	if (amdgpu_sriov_vf(adev))
@@ -1848,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 {
 	int i, r;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1)
+		amdgpu_xgmi_remove_device(adev);
+
 	amdgpu_amdkfd_device_fini(adev);
 
 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -1890,7 +1910,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			amdgpu_ucode_free_bo(adev);
-			amdgpu_free_static_csa(adev);
+			amdgpu_free_static_csa(&adev->virt.csa_obj);
 			amdgpu_device_wb_fini(adev);
 			amdgpu_device_vram_scratch_fini(adev);
 		}
@@ -2337,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
 }
 
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+	struct amdgpu_device *adev =
+		container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+	adev->asic_reset_res =  amdgpu_asic_reset(adev);
+	if (adev->asic_reset_res)
+		DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+			 adev->asic_reset_res, adev->ddev->unique);
+}
+
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2435,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
 			  amdgpu_device_delay_enable_gfx_off);
 
+	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
 	adev->gfx.gfx_off_req_count = 1;
 	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
 
@@ -2455,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-	/* doorbell bar mapping */
-	amdgpu_device_doorbell_init(adev);
-
 	/* io port mapping */
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2476,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	if (r)
 		return r;
 
+	/* doorbell bar mapping and doorbell index init*/
+	amdgpu_device_doorbell_init(adev);
+
 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
 	/* this will fail for cards that aren't VGA class devices, just
 	 * ignore it */
@@ -3148,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 	return 0;
 }
 
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
-	bool need_full_reset, vram_lost = 0;
-	int r;
-
-	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
-	if (!need_full_reset) {
-		amdgpu_device_ip_pre_soft_reset(adev);
-		r = amdgpu_device_ip_soft_reset(adev);
-		amdgpu_device_ip_post_soft_reset(adev);
-		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
-			DRM_INFO("soft reset failed, will fallback to full reset!\n");
-			need_full_reset = true;
-		}
-	}
-
-	if (need_full_reset) {
-		r = amdgpu_device_ip_suspend(adev);
-
-retry:
-		r = amdgpu_asic_reset(adev);
-		/* post card */
-		amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
-		if (!r) {
-			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
-			r = amdgpu_device_ip_resume_phase1(adev);
-			if (r)
-				goto out;
-
-			vram_lost = amdgpu_device_check_vram_lost(adev);
-			if (vram_lost) {
-				DRM_ERROR("VRAM is lost!\n");
-				atomic_inc(&adev->vram_lost_counter);
-			}
-
-			r = amdgpu_gtt_mgr_recover(
-				&adev->mman.bdev.man[TTM_PL_TT]);
-			if (r)
-				goto out;
-
-			r = amdgpu_device_fw_loading(adev);
-			if (r)
-				return r;
-
-			r = amdgpu_device_ip_resume_phase2(adev);
-			if (r)
-				goto out;
-
-			if (vram_lost)
-				amdgpu_device_fill_reset_magic(adev);
-		}
-	}
-
-out:
-	if (!r) {
-		amdgpu_irq_gpu_reset_resume_helper(adev);
-		r = amdgpu_ib_ring_tests(adev);
-		if (r) {
-			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
-			r = amdgpu_device_ip_suspend(adev);
-			need_full_reset = true;
-			goto retry;
-		}
-	}
-
-	if (!r)
-		r = amdgpu_device_recover_vram(adev);
-
-	return r;
-}
 
 /**
  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3295,40 +3250,46 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 		return false;
 	}
 
-	if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1  &&
-					 !amdgpu_sriov_vf(adev))) {
-		DRM_INFO("GPU recovery disabled.\n");
-		return false;
+	if (amdgpu_gpu_recovery == 0)
+		goto disabled;
+
+	if (amdgpu_sriov_vf(adev))
+		return true;
+
+	if (amdgpu_gpu_recovery == -1) {
+		switch (adev->asic_type) {
+		case CHIP_BONAIRE:
+		case CHIP_HAWAII:
+		case CHIP_TOPAZ:
+		case CHIP_TONGA:
+		case CHIP_FIJI:
+		case CHIP_POLARIS10:
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS12:
+		case CHIP_VEGAM:
+		case CHIP_VEGA20:
+		case CHIP_VEGA10:
+		case CHIP_VEGA12:
+			break;
+		default:
+			goto disabled;
+		}
 	}
 
 	return true;
+
+disabled:
+		DRM_INFO("GPU recovery disabled.\n");
+		return false;
 }
 
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
-			      struct amdgpu_job *job)
+
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+					struct amdgpu_job *job,
+					bool *need_full_reset_arg)
 {
-	int i, r, resched;
-
-	dev_info(adev->dev, "GPU reset begin!\n");
-
-	mutex_lock(&adev->lock_reset);
-	atomic_inc(&adev->gpu_reset_counter);
-	adev->in_gpu_reset = 1;
-
-	/* Block kfd */
-	amdgpu_amdkfd_pre_reset(adev);
-
-	/* block TTM */
-	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+	int i, r = 0;
+	bool need_full_reset  = *need_full_reset_arg;
 
 	/* block all schedulers and reset given job's ring */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3348,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		amdgpu_fence_driver_force_completion(ring);
 	}
 
-	if (amdgpu_sriov_vf(adev))
-		r = amdgpu_device_reset_sriov(adev, job ? false : true);
-	else
-		r = amdgpu_device_reset(adev);
+
+
+	if (!amdgpu_sriov_vf(adev)) {
+
+		if (!need_full_reset)
+			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+		if (!need_full_reset) {
+			amdgpu_device_ip_pre_soft_reset(adev);
+			r = amdgpu_device_ip_soft_reset(adev);
+			amdgpu_device_ip_post_soft_reset(adev);
+			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+				DRM_INFO("soft reset failed, will fallback to full reset!\n");
+				need_full_reset = true;
+			}
+		}
+
+		if (need_full_reset)
+			r = amdgpu_device_ip_suspend(adev);
+
+		*need_full_reset_arg = need_full_reset;
+	}
+
+	return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+			       struct list_head *device_list_handle,
+			       bool *need_full_reset_arg)
+{
+	struct amdgpu_device *tmp_adev = NULL;
+	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+	int r = 0;
+
+	/*
+	 * ASIC reset has to be done on all HGMI hive nodes ASAP
+	 * to allow proper links negotiation in FW (within 1 sec)
+	 */
+	if (need_full_reset) {
+		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+			/* For XGMI run all resets in parallel to speed up the process */
+			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+				if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+					r = -EALREADY;
+			} else
+				r = amdgpu_asic_reset(tmp_adev);
+
+			if (r) {
+				DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+					 r, tmp_adev->ddev->unique);
+				break;
+			}
+		}
+
+		/* For XGMI wait for all PSP resets to complete before proceed */
+		if (!r) {
+			list_for_each_entry(tmp_adev, device_list_handle,
+					    gmc.xgmi.head) {
+				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+					flush_work(&tmp_adev->xgmi_reset_work);
+					r = tmp_adev->asic_reset_res;
+					if (r)
+						break;
+				}
+			}
+		}
+	}
+
+
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+		if (need_full_reset) {
+			/* post card */
+			if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+				DRM_WARN("asic atom init failed!");
+
+			if (!r) {
+				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+				r = amdgpu_device_ip_resume_phase1(tmp_adev);
+				if (r)
+					goto out;
+
+				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+				if (vram_lost) {
+					DRM_ERROR("VRAM is lost!\n");
+					atomic_inc(&tmp_adev->vram_lost_counter);
+				}
+
+				r = amdgpu_gtt_mgr_recover(
+					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
+				if (r)
+					goto out;
+
+				r = amdgpu_device_fw_loading(tmp_adev);
+				if (r)
+					return r;
+
+				r = amdgpu_device_ip_resume_phase2(tmp_adev);
+				if (r)
+					goto out;
+
+				if (vram_lost)
+					amdgpu_device_fill_reset_magic(tmp_adev);
+
+				/* Update PSP FW topology after reset */
+				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+			}
+		}
+
+
+out:
+		if (!r) {
+			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+			r = amdgpu_ib_ring_tests(tmp_adev);
+			if (r) {
+				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+				r = amdgpu_device_ip_suspend(tmp_adev);
+				need_full_reset = true;
+				r = -EAGAIN;
+				goto end;
+			}
+		}
+
+		if (!r)
+			r = amdgpu_device_recover_vram(tmp_adev);
+		else
+			tmp_adev->asic_reset_res = r;
+	}
+
+end:
+	*need_full_reset_arg = need_full_reset;
+	return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+					  struct amdgpu_job *job)
+{
+	int i;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -3363,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		 * or all rings (in the case @job is NULL)
 		 * after above amdgpu_reset accomplished
 		 */
-		if ((!job || job->base.sched == &ring->sched) && !r)
+		if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
 			drm_sched_job_recovery(&ring->sched);
 
 		kthread_unpark(ring->sched.thread);
@@ -3373,21 +3468,142 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		drm_helper_resume_force_mode(adev->ddev);
 	}
 
-	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+	adev->asic_reset_res = 0;
+}
 
-	if (r) {
-		/* bad news, how to tell it to userspace ? */
-		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
-		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
-	} else {
-		dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
-	}
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+	mutex_lock(&adev->lock_reset);
+	atomic_inc(&adev->gpu_reset_counter);
+	adev->in_gpu_reset = 1;
+	/* Block kfd */
+	amdgpu_amdkfd_pre_reset(adev);
+}
 
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
 	/*unlock kfd */
 	amdgpu_amdkfd_post_reset(adev);
 	amdgpu_vf_error_trans_all(adev);
 	adev->in_gpu_reset = 0;
 	mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+			      struct amdgpu_job *job)
+{
+	int r;
+	struct amdgpu_hive_info *hive = NULL;
+	bool need_full_reset = false;
+	struct amdgpu_device *tmp_adev = NULL;
+	struct list_head device_list, *device_list_handle =  NULL;
+
+	INIT_LIST_HEAD(&device_list);
+
+	dev_info(adev->dev, "GPU reset begin!\n");
+
+	/*
+	 * In case of XGMI hive disallow concurrent resets to be triggered
+	 * by different nodes. No point also since the one node already executing
+	 * reset will also reset all the other nodes in the hive.
+	 */
+	hive = amdgpu_get_xgmi_hive(adev);
+	if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    !mutex_trylock(&hive->hive_lock))
+		return 0;
+
+	/* Start with adev pre asic reset first for soft reset check.*/
+	amdgpu_device_lock_adev(adev);
+	r = amdgpu_device_pre_asic_reset(adev,
+					 job,
+					 &need_full_reset);
+	if (r) {
+		/*TODO Should we stop ?*/
+		DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+			  r, adev->ddev->unique);
+		adev->asic_reset_res = r;
+	}
+
+	/* Build list of devices to reset */
+	if  (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+		if (!hive) {
+			amdgpu_device_unlock_adev(adev);
+			return -ENODEV;
+		}
+
+		/*
+		 * In case we are in XGMI hive mode device reset is done for all the
+		 * nodes in the hive to retrain all XGMI links and hence the reset
+		 * sequence is executed in loop on all nodes.
+		 */
+		device_list_handle = &hive->device_list;
+	} else {
+		list_add_tail(&adev->gmc.xgmi.head, &device_list);
+		device_list_handle = &device_list;
+	}
+
+retry:	/* Rest of adevs pre asic reset from XGMI hive. */
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+		if (tmp_adev == adev)
+			continue;
+
+		amdgpu_device_lock_adev(tmp_adev);
+		r = amdgpu_device_pre_asic_reset(tmp_adev,
+						 NULL,
+						 &need_full_reset);
+		/*TODO Should we stop ?*/
+		if (r) {
+			DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+				  r, tmp_adev->ddev->unique);
+			tmp_adev->asic_reset_res = r;
+		}
+	}
+
+	/* Actual ASIC resets if needed.*/
+	/* TODO Implement XGMI hive reset logic for SRIOV */
+	if (amdgpu_sriov_vf(adev)) {
+		r = amdgpu_device_reset_sriov(adev, job ? false : true);
+		if (r)
+			adev->asic_reset_res = r;
+	} else {
+		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+		if (r && r == -EAGAIN)
+			goto retry;
+	}
+
+	/* Post ASIC reset for all devs .*/
+	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+		amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+		if (r) {
+			/* bad news, how to tell it to userspace ? */
+			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+		} else {
+			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+		}
+
+		amdgpu_device_unlock_adev(tmp_adev);
+	}
+
+	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+		mutex_unlock(&hive->hive_lock);
+
+	if (r)
+		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26d..15ce7e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
 			drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
 		if (!adev->mode_info.max_bpc_property)
 			return -ENOMEM;
+		adev->mode_info.abm_level_property =
+			drm_property_create_range(adev->ddev, 0,
+						"abm level", 0, 4);
+		if (!adev->mode_info.abm_level_property)
+			return -ENOMEM;
 	}
 
 	return 0;
@@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
 	if (in_vbl && (*vpos >= vbl_start)) {
 		vtotal = mode->crtc_vtotal;
-		*vpos = *vpos - vtotal;
+
+		/* With variable refresh rate displays the vpos can exceed
+		 * the vtotal value. Clamp to 0 to return -vbl_end instead
+		 * of guessing the remaining number of lines until scanout.
+		 */
+		*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
 	}
 
 	/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 0000000..be620b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+	/* doorbell mmio */
+	resource_size_t		base;
+	resource_size_t		size;
+	u32 __iomem		*ptr;
+	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+	uint32_t kiq;
+	uint32_t mec_ring0;
+	uint32_t mec_ring1;
+	uint32_t mec_ring2;
+	uint32_t mec_ring3;
+	uint32_t mec_ring4;
+	uint32_t mec_ring5;
+	uint32_t mec_ring6;
+	uint32_t mec_ring7;
+	uint32_t userqueue_start;
+	uint32_t userqueue_end;
+	uint32_t gfx_ring0;
+	uint32_t sdma_engine0;
+	uint32_t sdma_engine1;
+	uint32_t sdma_engine2;
+	uint32_t sdma_engine3;
+	uint32_t sdma_engine4;
+	uint32_t sdma_engine5;
+	uint32_t sdma_engine6;
+	uint32_t sdma_engine7;
+	uint32_t ih;
+	union {
+		struct {
+			uint32_t vcn_ring0_1;
+			uint32_t vcn_ring2_3;
+			uint32_t vcn_ring4_5;
+			uint32_t vcn_ring6_7;
+		} vcn;
+		struct {
+			uint32_t uvd_ring0_1;
+			uint32_t uvd_ring2_3;
+			uint32_t uvd_ring4_5;
+			uint32_t uvd_ring6_7;
+			uint32_t vce_ring0_1;
+			uint32_t vce_ring2_3;
+			uint32_t vce_ring4_5;
+			uint32_t vce_ring6_7;
+		} uvd_vce;
+	};
+	uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+	AMDGPU_DOORBELL_KIQ                     = 0x000,
+	AMDGPU_DOORBELL_HIQ                     = 0x001,
+	AMDGPU_DOORBELL_DIQ                     = 0x002,
+	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
+	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
+	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
+	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
+	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
+	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
+	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
+	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
+	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
+	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
+	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
+	AMDGPU_DOORBELL_IH                      = 0x1E8,
+	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
+	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+	/* Compute + GFX: 0~255 */
+	AMDGPU_VEGA20_DOORBELL_KIQ                     = 0x000,
+	AMDGPU_VEGA20_DOORBELL_HIQ                     = 0x001,
+	AMDGPU_VEGA20_DOORBELL_DIQ                     = 0x002,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING0               = 0x003,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING1               = 0x004,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING2               = 0x005,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING3               = 0x006,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING4               = 0x007,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING5               = 0x008,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING6               = 0x009,
+	AMDGPU_VEGA20_DOORBELL_MEC_RING7               = 0x00A,
+	AMDGPU_VEGA20_DOORBELL_USERQUEUE_START	       = 0x00B,
+	AMDGPU_VEGA20_DOORBELL_USERQUEUE_END	       = 0x08A,
+	AMDGPU_VEGA20_DOORBELL_GFX_RING0               = 0x08B,
+	/* SDMA:256~335*/
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0            = 0x100,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1            = 0x10A,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2            = 0x114,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3            = 0x11E,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4            = 0x128,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5            = 0x132,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6            = 0x13C,
+	AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7            = 0x146,
+	/* IH: 376~391 */
+	AMDGPU_VEGA20_DOORBELL_IH                      = 0x178,
+	/* MMSCH: 392~407
+	 * overlap the doorbell assignment with VCN as they are  mutually exclusive
+	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+	 */
+	AMDGPU_VEGA20_DOORBELL64_VCN0_1                  = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+	AMDGPU_VEGA20_DOORBELL64_VCN2_3                  = 0x189,
+	AMDGPU_VEGA20_DOORBELL64_VCN4_5                  = 0x18A,
+	AMDGPU_VEGA20_DOORBELL64_VCN6_7                  = 0x18B,
+
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1             = 0x188,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3             = 0x189,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5             = 0x18A,
+	AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7             = 0x18B,
+
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1             = 0x18C,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3             = 0x18D,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5             = 0x18E,
+	AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7             = 0x18F,
+	AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT            = 0x18F,
+	AMDGPU_VEGA20_DOORBELL_INVALID                   = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+	/*
+	 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+	 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+	 *  Compute related doorbells are allocated from 0x00 to 0x8a
+	 */
+
+
+	/* kernel scheduling */
+	AMDGPU_DOORBELL64_KIQ                     = 0x00,
+
+	/* HSA interface queue and debug queue */
+	AMDGPU_DOORBELL64_HIQ                     = 0x01,
+	AMDGPU_DOORBELL64_DIQ                     = 0x02,
+
+	/* Compute engines */
+	AMDGPU_DOORBELL64_MEC_RING0               = 0x03,
+	AMDGPU_DOORBELL64_MEC_RING1               = 0x04,
+	AMDGPU_DOORBELL64_MEC_RING2               = 0x05,
+	AMDGPU_DOORBELL64_MEC_RING3               = 0x06,
+	AMDGPU_DOORBELL64_MEC_RING4               = 0x07,
+	AMDGPU_DOORBELL64_MEC_RING5               = 0x08,
+	AMDGPU_DOORBELL64_MEC_RING6               = 0x09,
+	AMDGPU_DOORBELL64_MEC_RING7               = 0x0a,
+
+	/* User queue doorbell range (128 doorbells) */
+	AMDGPU_DOORBELL64_USERQUEUE_START         = 0x0b,
+	AMDGPU_DOORBELL64_USERQUEUE_END           = 0x8a,
+
+	/* Graphics engine */
+	AMDGPU_DOORBELL64_GFX_RING0               = 0x8b,
+
+	/*
+	 * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+	 * Graphics voltage island aperture 1
+	 * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+	 */
+
+	/* For vega10 sriov, the sdma doorbell must be fixed as follow
+	 * to keep the same setting with host driver, or it will
+	 * happen conflicts
+	 */
+	AMDGPU_DOORBELL64_sDMA_ENGINE0            = 0xF0,
+	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0     = 0xF1,
+	AMDGPU_DOORBELL64_sDMA_ENGINE1            = 0xF2,
+	AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1     = 0xF3,
+
+	/* Interrupt handler */
+	AMDGPU_DOORBELL64_IH                      = 0xF4,  /* For legacy interrupt ring buffer */
+	AMDGPU_DOORBELL64_IH_RING1                = 0xF5,  /* For page migration request log */
+	AMDGPU_DOORBELL64_IH_RING2                = 0xF6,  /* For page migration translation/invalidation log */
+
+	/* VCN engine use 32 bits doorbell  */
+	AMDGPU_DOORBELL64_VCN0_1                  = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+	AMDGPU_DOORBELL64_VCN2_3                  = 0xF9,
+	AMDGPU_DOORBELL64_VCN4_5                  = 0xFA,
+	AMDGPU_DOORBELL64_VCN6_7                  = 0xFB,
+
+	/* overlap the doorbell assignment with VCN as they are  mutually exclusive
+	 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+	 */
+	AMDGPU_DOORBELL64_UVD_RING0_1             = 0xF8,
+	AMDGPU_DOORBELL64_UVD_RING2_3             = 0xF9,
+	AMDGPU_DOORBELL64_UVD_RING4_5             = 0xFA,
+	AMDGPU_DOORBELL64_UVD_RING6_7             = 0xFB,
+
+	AMDGPU_DOORBELL64_VCE_RING0_1             = 0xFC,
+	AMDGPU_DOORBELL64_VCE_RING2_3             = 0xFD,
+	AMDGPU_DOORBELL64_VCE_RING4_5             = 0xFE,
+	AMDGPU_DOORBELL64_VCE_RING6_7             = 0xFF,
+
+	AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
+	AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 74b611e..9c77eaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
 
 /**
  * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
  */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
 module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
 
 /**
@@ -1227,9 +1228,6 @@ static struct drm_driver kms_driver = {
 	.patchlevel = KMS_DRIVER_PATCHLEVEL,
 };
 
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
 static struct pci_driver amdgpu_kms_pci_driver = {
 	.name = DRIVER_NAME,
 	.id_table = pciidlist,
@@ -1259,16 +1257,14 @@ static int __init amdgpu_init(void)
 		goto error_fence;
 
 	DRM_INFO("amdgpu kernel modesetting enabled.\n");
-	driver = &kms_driver;
-	pdriver = &amdgpu_kms_pci_driver;
-	driver->num_ioctls = amdgpu_max_kms_ioctl;
+	kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
 	/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
 	amdgpu_amdkfd_init();
 
 	/* let modprobe override vga console setting */
-	return pci_register_driver(pdriver);
+	return pci_register_driver(&amdgpu_kms_pci_driver);
 
 error_fence:
 	amdgpu_sync_fini();
@@ -1280,7 +1276,7 @@ static int __init amdgpu_init(void)
 static void __exit amdgpu_exit(void)
 {
 	amdgpu_amdkfd_fini();
-	pci_unregister_driver(pdriver);
+	pci_unregister_driver(&amdgpu_kms_pci_driver);
 	amdgpu_unregister_atpx_handler();
 	amdgpu_sync_fini();
 	amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5448cf2..ee47c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 	ring->fence_drv.irq_type = irq_type;
 	ring->fence_drv.initialized = true;
 
-	dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
-		"cpu addr 0x%p\n", ring->idx,
-		ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+		      "0x%016llx, cpu addr 0x%p\n", ring->name,
+		      ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 11fea28..6d11e17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 	}
 	mb();
 	amdgpu_asic_flush_hdp(adev, NULL);
-	amdgpu_gmc_flush_gpu_tlb(adev, 0);
+	amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
 	return 0;
 }
 
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  * @offset: offset into the GPU's gart aperture
  * @pages: number of pages to bind
  * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
  *
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
 	mb();
 	amdgpu_asic_flush_hdp(adev, NULL);
-	amdgpu_gmc_flush_gpu_tlb(adev, 0);
+	amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 9ff6288..afa2e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -41,6 +41,7 @@ struct amdgpu_bo;
 
 struct amdgpu_gart {
 	struct amdgpu_bo		*bo;
+	/* CPU kmapped address of gart table */
 	void				*ptr;
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1eb..f4f0021 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 	INIT_LIST_HEAD(&duplicates);
 
 	tv.bo = &bo->tbo;
-	tv.shared = true;
+	tv.num_shared = 1;
 	list_add(&tv.head, &list);
 
 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 			return -ENOENT;
 		abo = gem_to_amdgpu_bo(gobj);
 		tv.bo = &abo->tbo;
-		tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+			tv.num_shared = 1;
+		else
+			tv.num_shared = 0;
 		list_add(&tv.head, &list);
 	} else {
 		gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba..f1ddfc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
 /*
  * GEM objects.
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b8..97a60da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
 
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
@@ -249,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
 	ring->adev = NULL;
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+	ring->doorbell_index = adev->doorbell_index.kiq;
 
 	r = amdgpu_gfx_kiq_acquire(adev, ring);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b61b5c1..f790e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -29,6 +29,7 @@
  */
 #include "clearstate_defs.h"
 #include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
 
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
@@ -37,59 +38,6 @@
 #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
 #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
 
-
-struct amdgpu_rlc_funcs {
-	void (*enter_safe_mode)(struct amdgpu_device *adev);
-	void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
-	/* for power gating */
-	struct amdgpu_bo	*save_restore_obj;
-	uint64_t		save_restore_gpu_addr;
-	volatile uint32_t	*sr_ptr;
-	const u32               *reg_list;
-	u32                     reg_list_size;
-	/* for clear state */
-	struct amdgpu_bo	*clear_state_obj;
-	uint64_t		clear_state_gpu_addr;
-	volatile uint32_t	*cs_ptr;
-	const struct cs_section_def   *cs_data;
-	u32                     clear_state_size;
-	/* for cp tables */
-	struct amdgpu_bo	*cp_table_obj;
-	uint64_t		cp_table_gpu_addr;
-	volatile uint32_t	*cp_table_ptr;
-	u32                     cp_table_size;
-
-	/* safe mode for updating CG/PG state */
-	bool in_safe_mode;
-	const struct amdgpu_rlc_funcs *funcs;
-
-	/* for firmware data */
-	u32 save_and_restore_offset;
-	u32 clear_state_descriptor_offset;
-	u32 avail_scratch_ram_locations;
-	u32 reg_restore_list_size;
-	u32 reg_list_format_start;
-	u32 reg_list_format_separate_start;
-	u32 starting_offsets_start;
-	u32 reg_list_format_size_bytes;
-	u32 reg_list_size_bytes;
-	u32 reg_list_format_direct_reg_list_length;
-	u32 save_restore_list_cntl_size_bytes;
-	u32 save_restore_list_gpm_size_bytes;
-	u32 save_restore_list_srm_size_bytes;
-
-	u32 *register_list_format;
-	u32 *register_restore;
-	u8 *save_restore_list_cntl;
-	u8 *save_restore_list_gpm;
-	u8 *save_restore_list_srm;
-
-	bool is_rlc_v2_1;
-};
-
 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
 
 struct amdgpu_mec {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6fa7ef4..81e6070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
 struct amdgpu_gmc_funcs {
 	/* flush the vm tlb via mmio */
 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
-			      uint32_t vmid);
+			      uint32_t vmid, uint32_t flush_type);
 	/* flush the vm tlb via ring */
 	uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
 				       uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
 
 struct amdgpu_xgmi {
 	/* from psp */
-	u64 device_id;
+	u64 node_id;
 	u64 hive_id;
 	/* fixed per family */
 	u64 node_segment_size;
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
 	unsigned num_physical_nodes;
 	/* gpu list in the same hive */
 	struct list_head head;
+	bool supported;
 };
 
 struct amdgpu_gmc {
@@ -151,7 +152,7 @@ struct amdgpu_gmc {
 	struct amdgpu_xgmi xgmi;
 };
 
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b8963b7..c48207b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		fence_ctx = 0;
 	}
 
-	if (!ring->ready) {
+	if (!ring->sched.ready) {
 		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
 		return -EINVAL;
 	}
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 			!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
 			continue;
 
-		amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
-				    need_ctx_switch);
+		amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
 		need_ctx_switch = false;
 	}
 
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
 	}
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+	for (i = 0; i < adev->num_rings; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
 		long tmo;
 
-		if (!ring || !ring->ready)
-			continue;
-
-		/* skip IB tests for KIQ in general for the below reasons:
-		 * 1. We never submit IBs to the KIQ
-		 * 2. KIQ doesn't use the EOP interrupts,
-		 *    we use some other CP interrupt.
+		/* KIQ rings don't have an IB test because we never submit IBs
+		 * to them and they have no interrupt support.
 		 */
-		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+		if (!ring->sched.ready || !ring->funcs->test_ib)
 			continue;
 
 		/* MM engine need more time */
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 			tmo = tmo_gfx;
 
 		r = amdgpu_ring_test_ib(ring, tmo);
-		if (r) {
-			ring->ready = false;
+		if (!r) {
+			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+				      ring->name);
+			continue;
+		}
 
-			if (ring == &adev->gfx.gfx_ring[0]) {
-				/* oh, oh, that's really bad */
-				DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
-				adev->accel_working = false;
-				return r;
+		ring->sched.ready = false;
+		DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+			  ring->name, r);
 
-			} else {
-				/* still not good, but we can live with it */
-				DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
-				ret = r;
-			}
+		if (ring == &adev->gfx.gfx_ring[0]) {
+			/* oh, oh, that's really bad */
+			adev->accel_working = false;
+			return r;
+
+		} else {
+			ret = r;
 		}
 	}
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93..f877bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
 struct amdgpu_ih_funcs {
 	/* ring read/write ptr handling, called from interrupt context */
 	u32 (*get_wptr)(struct amdgpu_device *adev);
-	bool (*prescreen_iv)(struct amdgpu_device *adev);
 	void (*decode_iv)(struct amdgpu_device *adev,
 			  struct amdgpu_iv_entry *entry);
 	void (*set_rptr)(struct amdgpu_device *adev);
 };
 
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6..b7968f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
 }
 
 /**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
-	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
-						  reset_work);
-
-	if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
-		amdgpu_device_gpu_recover(adev, NULL);
-}
-
-/**
  * amdgpu_irq_disable_all - disable *all* interrupts
  *
  * @adev: amdgpu device pointer
@@ -162,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
 	u32 ring_index = ih->rptr >> 2;
 	struct amdgpu_iv_entry entry;
 
-	/* Prescreening of high-frequency interrupts */
-	if (!amdgpu_ih_prescreen_iv(adev))
-		return;
-
-	/* Before dispatching irq to IP blocks, send it to amdkfd */
-	amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
 	amdgpu_ih_decode_iv(adev, &entry);
 
@@ -262,15 +238,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 				amdgpu_hotplug_work_func);
 	}
 
-	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
-
 	adev->irq.installed = true;
 	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
 	if (r) {
 		adev->irq.installed = false;
 		if (!amdgpu_device_has_dc_support(adev))
 			flush_work(&adev->hotplug_work);
-		cancel_work_sync(&adev->reset_work);
 		return r;
 	}
 	adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +272,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 			pci_disable_msi(adev->pdev);
 		if (!amdgpu_device_has_dc_support(adev))
 			flush_work(&adev->hotplug_work);
-		cancel_work_sync(&adev->reset_work);
 	}
 
 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
@@ -392,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 	unsigned client_id = entry->client_id;
 	unsigned src_id = entry->src_id;
 	struct amdgpu_irq_src *src;
+	bool handled = false;
 	int r;
 
 	trace_amdgpu_iv(entry);
 
 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
-		return;
-	}
 
-	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
-		return;
-	}
 
-	if (adev->irq.virq[src_id]) {
+	} else if (adev->irq.virq[src_id]) {
 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
-	} else {
-		if (!adev->irq.client[client_id].sources) {
-			DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
-				  client_id, src_id);
-			return;
-		}
 
-		src = adev->irq.client[client_id].sources[src_id];
-		if (!src) {
-			DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
-			return;
-		}
+	} else if (!adev->irq.client[client_id].sources) {
+		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+			  client_id, src_id);
 
+	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
 		r = src->funcs->process(adev, src, entry);
-		if (r)
+		if (r < 0)
 			DRM_ERROR("error processing interrupt (%d)\n", r);
+		else if (r)
+			handled = true;
+
+	} else {
+		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
 	}
+
+	/* Send it to amdkfd as well if it isn't already handled */
+	if (!handled)
+		amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733..e0af44f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
 
+	drm_sched_job_cleanup(s_job);
+
 	amdgpu_ring_priority_put(ring, s_job->s_priority);
 	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 57cfe78..e1b46a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -33,6 +33,8 @@
 #define to_amdgpu_job(sched_job)		\
 		container_of((sched_job), struct amdgpu_job, base)
 
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
 struct amdgpu_fence;
 
 struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8f3d44e..bc62bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_GFX:
 		type = AMD_IP_BLOCK_TYPE_GFX;
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			if (adev->gfx.gfx_ring[i].ready)
+			if (adev->gfx.gfx_ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 32;
 		ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_COMPUTE:
 		type = AMD_IP_BLOCK_TYPE_GFX;
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			if (adev->gfx.compute_ring[i].ready)
+			if (adev->gfx.compute_ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 32;
 		ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_DMA:
 		type = AMD_IP_BLOCK_TYPE_SDMA;
 		for (i = 0; i < adev->sdma.num_instances; i++)
-			if (adev->sdma.instance[i].ring.ready)
+			if (adev->sdma.instance[i].ring.sched.ready)
 				++num_rings;
 		ib_start_alignment = 256;
 		ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 			if (adev->uvd.harvest_config & (1 << i))
 				continue;
 
-			if (adev->uvd.inst[i].ring.ready)
+			if (adev->uvd.inst[i].ring.sched.ready)
 				++num_rings;
 		}
 		ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_VCE:
 		type = AMD_IP_BLOCK_TYPE_VCE;
 		for (i = 0; i < adev->vce.num_rings; i++)
-			if (adev->vce.ring[i].ready)
+			if (adev->vce.ring[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 4;
 		ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 				continue;
 
 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
-				if (adev->uvd.inst[i].ring_enc[j].ready)
+				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
 					++num_rings;
 		}
 		ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 		break;
 	case AMDGPU_HW_IP_VCN_DEC:
 		type = AMD_IP_BLOCK_TYPE_VCN;
-		if (adev->vcn.ring_dec.ready)
+		if (adev->vcn.ring_dec.sched.ready)
 			++num_rings;
 		ib_start_alignment = 16;
 		ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
 	case AMDGPU_HW_IP_VCN_ENC:
 		type = AMD_IP_BLOCK_TYPE_VCN;
 		for (i = 0; i < adev->vcn.num_enc_rings; i++)
-			if (adev->vcn.ring_enc[i].ready)
+			if (adev->vcn.ring_enc[i].sched.ready)
 				++num_rings;
 		ib_start_alignment = 64;
 		ib_size_alignment = 1;
 		break;
 	case AMDGPU_HW_IP_VCN_JPEG:
 		type = AMD_IP_BLOCK_TYPE_VCN;
-		if (adev->vcn.ring_jpeg.ready)
+		if (adev->vcn.ring_jpeg.sched.ready)
 			++num_rings;
 		ib_start_alignment = 16;
 		ib_size_alignment = 16;
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	}
 
 	if (amdgpu_sriov_vf(adev)) {
-		r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
 		if (r)
 			goto error_vm;
 	}
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 	pasid = fpriv->vm.pasid;
 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
 
-	amdgpu_vm_fini(adev, &fpriv->vm);
 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+	amdgpu_vm_fini(adev, &fpriv->vm);
 
 	if (pasid)
 		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d1b4d9b..aadd0fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/hrtimer.h>
@@ -57,7 +56,6 @@ struct amdgpu_hpd;
 #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
 #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
 #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x)	container_of(x, struct amdgpu_plane, base)
 
 #define to_dm_plane_state(x)	container_of(x, struct dm_plane_state, base);
 
@@ -295,13 +293,6 @@ struct amdgpu_display_funcs {
 			      uint16_t connector_object_id,
 			      struct amdgpu_hpd *hpd,
 			      struct amdgpu_router *router);
-	/* it is used to enter or exit into free sync mode */
-	int (*notify_freesync)(struct drm_device *dev, void *data,
-			       struct drm_file *filp);
-	/* it is used to allow enablement of freesync mode */
-	int (*set_freesync_property)(struct drm_connector *connector,
-				     struct drm_property *property,
-				     uint64_t val);
 
 
 };
@@ -325,7 +316,7 @@ struct amdgpu_mode_info {
 	struct card_info *atom_card_info;
 	bool mode_config_initialized;
 	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
-	struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+	struct drm_plane *planes[AMDGPU_MAX_PLANES];
 	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
 	/* DVI-I properties */
 	struct drm_property *coherent_mode_property;
@@ -341,6 +332,8 @@ struct amdgpu_mode_info {
 	struct drm_property *dither_property;
 	/* maximum number of bits per channel for monitor color */
 	struct drm_property *max_bpc_property;
+	/* Adaptive Backlight Modulation (power feature) */
+	struct drm_property *abm_level_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
 	int bios_hardcoded_edid_size;
@@ -436,11 +429,6 @@ struct amdgpu_crtc {
 	struct drm_pending_vblank_event *event;
 };
 
-struct amdgpu_plane {
-	struct drm_plane base;
-	enum drm_plane_type plane_type;
-};
-
 struct amdgpu_encoder_atom_dig {
 	bool linkb;
 	/* atom dig */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 904014d..fd271f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
 		amdgpu_bo_subtract_pin_size(bo);
 
 	if (bo->kfd_bo)
-		amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+		amdgpu_amdkfd_unreserve_memory_limit(bo);
 
 	amdgpu_bo_kunmap(bo);
 
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
-			       struct amdgpu_ring *ring,
-			       struct amdgpu_bo *bo,
-			       struct reservation_object *resv,
-			       struct dma_fence **fence,
-			       bool direct)
-
-{
-	struct amdgpu_bo *shadow = bo->shadow;
-	uint64_t bo_addr, shadow_addr;
-	int r;
-
-	if (!shadow)
-		return -EINVAL;
-
-	bo_addr = amdgpu_bo_gpu_offset(bo);
-	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
-	r = reservation_object_reserve_shared(bo->tbo.resv);
-	if (r)
-		goto err;
-
-	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
-			       amdgpu_bo_size(bo), resv, fence,
-			       direct, false);
-	if (!r)
-		amdgpu_bo_fence(bo, *fence, true);
-
-err:
-	return r;
-}
-
-/**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d..9291c2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
-			       struct amdgpu_ring *ring,
-			       struct amdgpu_bo *bo,
-			       struct reservation_object *resv,
-			       struct dma_fence **fence, bool direct);
 int amdgpu_bo_validate(struct amdgpu_bo *bo);
 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
 			     struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 59cc678..1f61ed9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
 
 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
 
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
 		return 0;
 
+	/* Skip fan attributes on APU */
+	if ((adev->flags & AMD_IS_APU) &&
+	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+		return 0;
+
 	/* Skip limit attributes if DPM is not enabled */
 	if (!adev->pm.dpm_enabled &&
 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 		mutex_unlock(&adev->pm.mutex);
 	}
+	/* enable/disable Low Memory PState for UVD (4k videos) */
+	if (adev->asic_type == CHIP_STONEY &&
+		adev->uvd.decode_image_width >= WIDTH_4K) {
+		struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+		if (hwmgr && hwmgr->hwmgr_func &&
+		    hwmgr->hwmgr_func->update_nbdpm_pstate)
+			hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+							       !enable,
+							       true);
+	}
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
@@ -2129,7 +2155,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
-		if (ring && ring->ready)
+		if (ring && ring->sched.ready)
 			amdgpu_fence_wait_empty(ring);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index e45e929..71913a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
 /**
  * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
  * implementation
@@ -332,15 +330,13 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
 	return ret;
 }
 
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
 	.attach = amdgpu_gem_map_attach,
 	.detach = amdgpu_gem_map_detach,
 	.map_dma_buf = drm_gem_map_dma_buf,
 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 	.release = drm_gem_dmabuf_release,
 	.begin_cpu_access = amdgpu_gem_begin_cpu_access,
-	.map = drm_gem_dmabuf_kmap,
-	.unmap = drm_gem_dmabuf_kunmap,
 	.mmap = drm_gem_dmabuf_mmap,
 	.vmap = drm_gem_dmabuf_vmap,
 	.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 25d2f3e..6759d89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle)
 	adev->psp.sos_fw = NULL;
 	release_firmware(adev->psp.asd_fw);
 	adev->psp.asd_fw = NULL;
+	release_firmware(adev->psp.ta_fw);
+	adev->psp.ta_fw = NULL;
 	return 0;
 }
 
@@ -118,22 +120,26 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 static int
 psp_cmd_submit_buf(struct psp_context *psp,
 		   struct amdgpu_firmware_info *ucode,
-		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
-		   int index)
+		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 {
 	int ret;
+	int index;
 
 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 
+	index = atomic_inc_return(&psp->fence_value);
 	ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
 			     fence_mc_addr, index);
-
-	while (*((unsigned int *)psp->fence_buf) != index) {
-		msleep(1);
+	if (ret) {
+		atomic_dec(&psp->fence_value);
+		return ret;
 	}
 
+	while (*((unsigned int *)psp->fence_buf) != index)
+		msleep(1);
+
 	/* the status field must be 0 after FW is loaded */
 	if (ucode && psp->cmd_buf_mem->resp.status) {
 		DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
@@ -149,10 +155,22 @@ psp_cmd_submit_buf(struct psp_context *psp,
 	return ret;
 }
 
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+bool psp_support_vmr_ring(struct psp_context *psp)
+{
+	if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+		return true;
+	else
+		return false;
+}
+
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+				 struct psp_gfx_cmd_resp *cmd,
 				 uint64_t tmr_mc, uint32_t size)
 {
-	cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+	if (psp_support_vmr_ring(psp))
+		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+	else
+		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
 	cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -186,12 +204,12 @@ static int psp_tmr_load(struct psp_context *psp)
 	if (!cmd)
 		return -ENOMEM;
 
-	psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
 	DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
 			PSP_TMR_SIZE, psp->tmr_mc_addr);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-				 psp->fence_buf_mc_addr, 1);
+				 psp->fence_buf_mc_addr);
 	if (ret)
 		goto failed;
 
@@ -258,13 +276,194 @@ static int psp_asd_load(struct psp_context *psp)
 			     psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
 
 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
-				 psp->fence_buf_mc_addr, 2);
+				 psp->fence_buf_mc_addr);
 
 	kfree(cmd);
 
 	return ret;
 }
 
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					  uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+					  uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+        cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+        cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+        cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+        cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+	int ret;
+
+	/*
+	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
+	 * physical) for xgmi ta <-> Driver
+	 */
+	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+				      &psp->xgmi_context.xgmi_shared_bo,
+				      &psp->xgmi_context.xgmi_shared_mc_addr,
+				      &psp->xgmi_context.xgmi_shared_buf);
+
+	return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the loading in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+	psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+				      psp->xgmi_context.xgmi_shared_mc_addr,
+				      psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	if (!ret) {
+		psp->xgmi_context.initialized = 1;
+		psp->xgmi_context.session_id = cmd->resp.session_id;
+	}
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					    uint32_t xgmi_session_id)
+{
+	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+	cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the unloading in sriov for now
+	 */
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+					    uint32_t ta_cmd_id,
+					    uint32_t xgmi_session_id)
+{
+	cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+	cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+	cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+	/* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+	int ret;
+	struct psp_gfx_cmd_resp *cmd;
+
+	/*
+	 * TODO: bypass the loading in sriov for now
+	*/
+	if (amdgpu_sriov_vf(psp->adev))
+		return 0;
+
+	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+					psp->xgmi_context.session_id);
+
+	ret = psp_cmd_submit_buf(psp, NULL, cmd,
+				 psp->fence_buf_mc_addr);
+
+	kfree(cmd);
+
+        return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+	int ret;
+
+	if (!psp->xgmi_context.initialized)
+		return 0;
+
+	ret = psp_xgmi_unload(psp);
+	if (ret)
+		return ret;
+
+	psp->xgmi_context.initialized = 0;
+
+	/* free xgmi shared memory */
+	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+			&psp->xgmi_context.xgmi_shared_mc_addr,
+			&psp->xgmi_context.xgmi_shared_buf);
+
+	return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
+
+	if (!psp->xgmi_context.initialized) {
+		ret = psp_xgmi_init_shared_buf(psp);
+		if (ret)
+			return ret;
+	}
+
+	/* Load XGMI TA */
+	ret = psp_xgmi_load(psp);
+	if (ret)
+		return ret;
+
+	/* Initialize XGMI session */
+	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+	return ret;
+}
+
 static int psp_hw_start(struct psp_context *psp)
 {
 	struct amdgpu_device *adev = psp->adev;
@@ -292,6 +491,15 @@ static int psp_hw_start(struct psp_context *psp)
 	if (ret)
 		return ret;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+		ret = psp_xgmi_initialize(psp);
+		/* Warning the XGMI seesion initialize failure
+		 * Instead of stop driver initialization
+		 */
+		if (ret)
+			dev_err(psp->adev->dev,
+				"XGMI: Failed to initialize XGMI session\n");
+	}
 	return 0;
 }
 
@@ -321,7 +529,7 @@ static int psp_np_fw_load(struct psp_context *psp)
 			return ret;
 
 		ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
-					 psp->fence_buf_mc_addr, i + 3);
+					 psp->fence_buf_mc_addr);
 		if (ret)
 			return ret;
 
@@ -340,8 +548,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
 	int ret;
 	struct psp_context *psp = &adev->psp;
 
-	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+		psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 		goto skip_memalloc;
+	}
 
 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 	if (!psp->cmd)
@@ -452,6 +662,10 @@ static int psp_hw_fini(void *handle)
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		return 0;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    psp->xgmi_context.initialized == 1)
+                psp_xgmi_terminate(psp);
+
 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -479,6 +693,15 @@ static int psp_suspend(void *handle)
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		return 0;
 
+	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+	    psp->xgmi_context.initialized == 1) {
+		ret = psp_xgmi_terminate(psp);
+		if (ret) {
+			DRM_ERROR("Failed to terminate xgmi ta\n");
+			return ret;
+		}
+	}
+
 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
 	if (ret) {
 		DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8b8720e..10decf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,17 @@
 
 #include "amdgpu.h"
 #include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
 
 #define PSP_FENCE_BUFFER_SIZE	0x1000
 #define PSP_CMD_BUFFER_SIZE	0x1000
-#define PSP_ASD_SHARED_MEM_SIZE	0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
 #define PSP_1_MEG		0x100000
 #define PSP_TMR_SIZE	0x400000
 
 struct psp_context;
+struct psp_xgmi_node_info;
 struct psp_xgmi_topology_info;
 
 enum psp_ring_type
@@ -80,12 +83,20 @@ struct psp_funcs
 				  enum AMDGPU_UCODE_ID ucode_type);
 	bool (*smu_reload_quirk)(struct psp_context *psp);
 	int (*mode1_reset)(struct psp_context *psp);
-	uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
+	uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
 	uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
 	int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
-			struct psp_xgmi_topology_info *topology);
+				      struct psp_xgmi_topology_info *topology);
 	int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
-			struct psp_xgmi_topology_info *topology);
+				      struct psp_xgmi_topology_info *topology);
+};
+
+struct psp_xgmi_context {
+	uint8_t				initialized;
+	uint32_t			session_id;
+	struct amdgpu_bo                *xgmi_shared_bo;
+	uint64_t                        xgmi_shared_mc_addr;
+	void                            *xgmi_shared_buf;
 };
 
 struct psp_context
@@ -96,7 +107,7 @@ struct psp_context
 
 	const struct psp_funcs		*funcs;
 
-	/* fence buffer */
+	/* firmware buffer */
 	struct amdgpu_bo		*fw_pri_bo;
 	uint64_t			fw_pri_mc_addr;
 	void				*fw_pri_buf;
@@ -134,6 +145,16 @@ struct psp_context
 	struct amdgpu_bo		*cmd_buf_bo;
 	uint64_t			cmd_buf_mc_addr;
 	struct psp_gfx_cmd_resp		*cmd_buf_mem;
+
+	/* fence value associated with cmd buffer */
+	atomic_t			fence_value;
+
+	/* xgmi ta firmware and buffer */
+	const struct firmware		*ta_fw;
+	uint32_t			ta_xgmi_ucode_version;
+	uint32_t			ta_xgmi_ucode_size;
+	uint8_t				*ta_xgmi_start_addr;
+	struct psp_xgmi_context		xgmi_context;
 };
 
 struct amdgpu_psp_funcs {
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs {
 					enum AMDGPU_UCODE_ID);
 };
 
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES		64
+struct psp_xgmi_node_info {
+	uint64_t				node_id;
+	uint8_t					num_hops;
+	uint8_t					is_sharing_enabled;
+	enum ta_xgmi_assigned_sdma_engine	sdma_engine;
+};
+
 struct psp_xgmi_topology_info {
-	/* Generated by PSP to identify the GPU instance within xgmi connection */
-	uint64_t			device_id;
-	/*
-	 * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
-	 * connection vector topology, but not access enable the connections
-	 * if some or all bits are set to 1, driver indicates it want to retrieve the
-	 * current xgmi topology and  access enable the link to GPU[i] associated
-	 * with the bit position in the  vector.
-	 * On return,: bits indicated which xgmi links are present/active depending
-	 * on the  value passed in. The relative bit offset for the  relative GPU index
-	 * within the  hive is always marked active.
-	 */
-	uint32_t			connection_mask;
-	uint32_t			reserved; /* must be  0 */
+	uint32_t			num_nodes;
+	struct psp_xgmi_node_info	nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
 };
 
 #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info {
 		((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
 #define psp_mode1_reset(psp) \
 		((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
-		((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp) \
+		((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
 #define psp_xgmi_get_hive_id(psp) \
 		((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
 #define psp_xgmi_get_topology_info(psp, num_device, topology) \
@@ -199,6 +216,9 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
 
 int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+bool psp_support_vmr_ring(struct psp_context *psp);
+
 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85e..335a0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	/* Not to finish a ring which is not initialized */
 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
 {
 	ktime_t deadline = ktime_add_us(ktime_get(), 10000);
 
-	if (!ring->funcs->soft_recovery)
+	if (!ring->funcs->soft_recovery || !fence)
 		return false;
 
 	atomic_inc(&ring->adev->gpu_reset_counter);
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
 	debugfs_remove(ring->ent);
 #endif
 }
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int r;
+
+	r = amdgpu_ring_test_ring(ring);
+	if (r)
+		DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+			      ring->name, r);
+	else
+		DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+			      ring->name);
+
+	ring->sched.ready = !r;
+	return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4caa301..0beb01f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
 	unsigned emit_ib_size;
 	/* command emit functions */
 	void (*emit_ib)(struct amdgpu_ring *ring,
+			struct amdgpu_job *job,
 			struct amdgpu_ib *ib,
-			unsigned vmid, bool ctx_switch);
+			bool ctx_switch);
 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
 			   uint64_t seq, unsigned flags);
 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
 	uint64_t		gpu_addr;
 	uint64_t		ptr_mask;
 	uint32_t		buf_mask;
-	bool			ready;
 	u32			idx;
 	u32			me;
 	u32			pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
 	ring->count_dw -= count_dw;
 }
 
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 0000000..c8793e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+	if (adev->gfx.rlc.in_safe_mode)
+		return;
+
+	/* if RLC is not enabled, do nothing */
+	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+		return;
+
+	if (adev->cg_flags &
+	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+		adev->gfx.rlc.funcs->set_safe_mode(adev);
+		adev->gfx.rlc.in_safe_mode = true;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+	if (!(adev->gfx.rlc.in_safe_mode))
+		return;
+
+	/* if RLC is not enabled, do nothing */
+	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+		return;
+
+	if (adev->cg_flags &
+	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+		adev->gfx.rlc.funcs->unset_safe_mode(adev);
+		adev->gfx.rlc.in_safe_mode = false;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+	const u32 *src_ptr;
+	volatile u32 *dst_ptr;
+	u32 i;
+	int r;
+
+	/* allocate save restore block */
+	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.save_restore_obj,
+				      &adev->gfx.rlc.save_restore_gpu_addr,
+				      (void **)&adev->gfx.rlc.sr_ptr);
+	if (r) {
+		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* write the sr buffer */
+	src_ptr = adev->gfx.rlc.reg_list;
+	dst_ptr = adev->gfx.rlc.sr_ptr;
+	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+	volatile u32 *dst_ptr;
+	u32 dws;
+	int r;
+
+	/* allocate clear state block */
+	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.clear_state_obj,
+				      &adev->gfx.rlc.clear_state_gpu_addr,
+				      (void **)&adev->gfx.rlc.cs_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* set up the cs buffer */
+	dst_ptr = adev->gfx.rlc.cs_ptr;
+	adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+	amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+	amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+	int r;
+
+	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+				      &adev->gfx.rlc.cp_table_obj,
+				      &adev->gfx.rlc.cp_table_gpu_addr,
+				      (void **)&adev->gfx.rlc.cp_table_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+		amdgpu_gfx_rlc_fini(adev);
+		return r;
+	}
+
+	/* set up the cp table */
+	amdgpu_gfx_rlc_setup_cp_table(adev);
+	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+	return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+	const __le32 *fw_data;
+	volatile u32 *dst_ptr;
+	int me, i, max_me;
+	u32 bo_offset = 0;
+	u32 table_offset, table_size;
+
+	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+	/* write the cp table buffer */
+	dst_ptr = adev->gfx.rlc.cp_table_ptr;
+	for (me = 0; me < max_me; me++) {
+		if (me == 0) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.ce_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 1) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.pfp_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 2) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.me_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 3) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else  if (me == 4) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec2_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		}
+
+		for (i = 0; i < table_size; i ++) {
+			dst_ptr[bo_offset + i] =
+				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+		}
+
+		bo_offset += table_size;
+	}
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+	/* save restore block */
+	if (adev->gfx.rlc.save_restore_obj) {
+		amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+				      &adev->gfx.rlc.save_restore_gpu_addr,
+				      (void **)&adev->gfx.rlc.sr_ptr);
+	}
+
+	/* clear state block */
+	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+			      &adev->gfx.rlc.clear_state_gpu_addr,
+			      (void **)&adev->gfx.rlc.cs_ptr);
+
+	/* jump table block */
+	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+			      &adev->gfx.rlc.cp_table_gpu_addr,
+			      (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 0000000..49a8ab5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+	bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+	void (*set_safe_mode)(struct amdgpu_device *adev);
+	void (*unset_safe_mode)(struct amdgpu_device *adev);
+	int  (*init)(struct amdgpu_device *adev);
+	u32  (*get_csb_size)(struct amdgpu_device *adev);
+	void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+	int  (*get_cp_table_num)(struct amdgpu_device *adev);
+	int  (*resume)(struct amdgpu_device *adev);
+	void (*stop)(struct amdgpu_device *adev);
+	void (*reset)(struct amdgpu_device *adev);
+	void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+	/* for power gating */
+	struct amdgpu_bo        *save_restore_obj;
+	uint64_t                save_restore_gpu_addr;
+	volatile uint32_t       *sr_ptr;
+	const u32               *reg_list;
+	u32                     reg_list_size;
+	/* for clear state */
+	struct amdgpu_bo        *clear_state_obj;
+	uint64_t                clear_state_gpu_addr;
+	volatile uint32_t       *cs_ptr;
+	const struct cs_section_def   *cs_data;
+	u32                     clear_state_size;
+	/* for cp tables */
+	struct amdgpu_bo        *cp_table_obj;
+	uint64_t                cp_table_gpu_addr;
+	volatile uint32_t       *cp_table_ptr;
+	u32                     cp_table_size;
+
+	/* safe mode for updating CG/PG state */
+	bool in_safe_mode;
+	const struct amdgpu_rlc_funcs *funcs;
+
+	/* for firmware data */
+	u32 save_and_restore_offset;
+	u32 clear_state_descriptor_offset;
+	u32 avail_scratch_ram_locations;
+	u32 reg_restore_list_size;
+	u32 reg_list_format_start;
+	u32 reg_list_format_separate_start;
+	u32 starting_offsets_start;
+	u32 reg_list_format_size_bytes;
+	u32 reg_list_size_bytes;
+	u32 reg_list_format_direct_reg_list_length;
+	u32 save_restore_list_cntl_size_bytes;
+	u32 save_restore_list_gpm_size_bytes;
+	u32 save_restore_list_srm_size_bytes;
+
+	u32 *register_list_format;
+	u32 *register_restore;
+	u8 *save_restore_list_cntl;
+	u8 *save_restore_list_gpm;
+	u8 *save_restore_list_srm;
+
+	bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index bc9244b..115bb0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -28,17 +28,31 @@
  * GPU SDMA IP block helpers function.
  */
 
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	int i;
 
 	for (i = 0; i < adev->sdma.num_instances; i++)
-		if (&adev->sdma.instance[i].ring == ring)
-			break;
+		if (ring == &adev->sdma.instance[i].ring ||
+		    ring == &adev->sdma.instance[i].page)
+			return &adev->sdma.instance[i];
 
-	if (i < AMDGPU_MAX_SDMA_INSTANCES)
-		return &adev->sdma.instance[i];
-	else
-		return NULL;
+	return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		if (ring == &adev->sdma.instance[i].ring ||
+			ring == &adev->sdma.instance[i].page) {
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 500113e..16b1a6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
 	uint32_t		feature_version;
 
 	struct amdgpu_ring	ring;
+	struct amdgpu_ring	page;
 	bool			burst_nop;
 };
 
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
 	struct amdgpu_irq_src	illegal_inst_irq;
 	int			num_instances;
 	uint32_t                    srbm_soft_reset;
+	bool			has_page_queue;
 };
 
 /*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 
 struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e9bf70e..626abca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
 	    TP_ARGS(vm, ring, job),
 	    TP_STRUCT__entry(
 			     __field(u32, pasid)
+			     __string(ring, ring->name)
 			     __field(u32, ring)
 			     __field(u32, vmid)
 			     __field(u32, vm_hub)
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
 
 	    TP_fast_assign(
 			   __entry->pasid = vm->pasid;
-			   __entry->ring = ring->idx;
+			   __assign_str(ring, ring->name)
 			   __entry->vmid = job->vmid;
 			   __entry->vm_hub = ring->funcs->vmhub,
 			   __entry->pd_addr = job->vm_pd_addr;
 			   __entry->needs_flush = job->vm_needs_flush;
 			   ),
-	    TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
-		      __entry->pasid, __entry->ring, __entry->vmid,
+	    TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+		      __entry->pasid, __get_str(ring), __entry->vmid,
 		      __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
 );
 
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush,
 		     uint64_t pd_addr),
 	    TP_ARGS(ring, vmid, pd_addr),
 	    TP_STRUCT__entry(
-			     __field(u32, ring)
+			     __string(ring, ring->name)
 			     __field(u32, vmid)
 			     __field(u32, vm_hub)
 			     __field(u64, pd_addr)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->ring = ring->idx;
+			   __assign_str(ring, ring->name)
 			   __entry->vmid = vmid;
 			   __entry->vm_hub = ring->funcs->vmhub;
 			   __entry->pd_addr = pd_addr;
 			   ),
-	    TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
-		      __entry->ring, __entry->vmid,
+	    TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+		      __get_str(ring), __entry->vmid,
 		      __entry->vm_hub,__entry->pd_addr)
 );
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a44fc12..c91ec31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	/* ensure reference is false in case init fails */
-	adev->mman.mem_global_referenced = false;
-
-	global_ref = &adev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &amdgpu_ttm_mem_global_init;
-	global_ref->release = &amdgpu_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		goto error_mem;
-	}
-
-	adev->mman.bo_global_ref.mem_glob =
-		adev->mman.mem_global_ref.object;
-	global_ref = &adev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		goto error_bo;
-	}
-
-	mutex_init(&adev->mman.gtt_window_lock);
-
-	adev->mman.mem_global_referenced = true;
-
-	return 0;
-
-error_bo:
-	drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
-	return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
-	if (adev->mman.mem_global_referenced) {
-		mutex_destroy(&adev->mman.gtt_window_lock);
-		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		adev->mman.mem_global_referenced = false;
-	}
-}
-
 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
 	return 0;
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	int r;
 	u64 vis_vram_limit;
 
-	/* initialize global references for vram/gtt */
-	r = amdgpu_ttm_global_init(adev);
-	if (r) {
-		return r;
-	}
+	mutex_init(&adev->mman.gtt_window_lock);
+
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&adev->mman.bdev,
-			       adev->mman.bo_global_ref.ref.object,
 			       &amdgpu_bo_driver,
 			       adev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET,
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
 	ttm_bo_device_release(&adev->mman.bdev);
-	amdgpu_ttm_global_fini(adev);
 	adev->mman.initialized = false;
 	DRM_INFO("amdgpu: ttm finalized\n");
 }
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	unsigned i;
 	int r;
 
-	if (direct_submit && !ring->ready) {
+	if (direct_submit && !ring->sched.ready) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index fe8f276..b5b2d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,8 +39,6 @@
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS	2
 
 struct amdgpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
 	struct ttm_bo_device		bdev;
 	bool				mem_global_referenced;
 	bool				initialized;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index aa6641b..7ac25a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 {
 };
 
 /* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ta_xgmi_ucode_version;
+	uint32_t ta_xgmi_offset_bytes;
+	uint32_t ta_xgmi_size_bytes;
+	uint32_t ta_ras_ucode_version;
+	uint32_t ta_ras_offset_bytes;
+	uint32_t ta_ras_size_bytes;
+};
+
+/* version_major=1, version_minor=0 */
 struct gfx_firmware_header_v1_0 {
 	struct common_firmware_header header;
 	uint32_t ucode_feature_version;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
 	struct mc_firmware_header_v1_0 mc;
 	struct smc_firmware_header_v1_0 smc;
 	struct psp_firmware_header_v1_0 psp;
+	struct ta_firmware_header_v1_0 ta;
 	struct gfx_firmware_header_v1_0 gfx;
 	struct rlc_firmware_header_v1_0 rlc;
 	struct rlc_firmware_header_v2_0 rlc_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6..4e5d13e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
 	buf_sizes[0x1] = dpb_size;
 	buf_sizes[0x2] = image_size;
 	buf_sizes[0x4] = min_ctx_size;
+	/* store image width to adjust nb memory pstate */
+	adev->uvd.decode_image_width = width;
 	return 0;
 }
 
@@ -1243,30 +1245,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct dma_fence *fence;
 	long r;
-	uint32_t ip_instance = ring->me;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
-	} else {
-		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
 
 	dma_fence_put(fence);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a4..5eb6328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
 	struct drm_sched_entity entity;
 	struct delayed_work	idle_work;
 	unsigned		harvest_config;
+	/* store image width to adjust nb memory state */
+	unsigned		decode_image_width;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f540..98a1b2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1032,8 +1032,10 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-			     unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_job *job,
+				struct amdgpu_ib *ib,
+				bool ctx_switch)
 {
 	amdgpu_ring_write(ring, VCE_CMD_IB);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 		return 0;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, VCE_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		return 0;
 
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index a1f209e..5029365 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-			     unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+				struct amdgpu_ib *ib, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 				unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27da13d..e2e42e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
 
 	dma_fence_put(fence);
-
 error:
 	return r;
 }
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
 	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
 
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-				  ring->idx, r);
+	if (r)
 		return r;
-	}
 
 	amdgpu_ring_write(ring,
 		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-				  ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-				  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
 	return r;
 }
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r = 0;
 
 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		goto error;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto error;
-	} else
+	} else {
 		r = 0;
+	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout)
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-	else {
-		DRM_ERROR("ib test failed (0x%08X)\n", tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
 	dma_fence_put(fence);
-
 error:
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2f358a..462a04e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
 
 #include "amdgpu.h"
 
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
-	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
-	addr -= AMDGPU_VA_RESERVED_SIZE;
-	addr = amdgpu_gmc_sign_extend(addr);
-
-	return addr;
-}
-
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 {
 	/* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 }
 
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
-	int r;
-	void *ptr;
-
-	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
-				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
-				&adev->virt.csa_vmid0_addr, &ptr);
-	if (r)
-		return r;
-
-	memset(ptr, 0, AMDGPU_CSA_SIZE);
-	return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
-	amdgpu_bo_free_kernel(&adev->virt.csa_obj,
-						&adev->virt.csa_vmid0_addr,
-						NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va)
-{
-	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
-	struct ww_acquire_ctx ticket;
-	struct list_head list;
-	struct amdgpu_bo_list_entry pd;
-	struct ttm_validate_buffer csa_tv;
-	int r;
-
-	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&csa_tv.head);
-	csa_tv.bo = &adev->virt.csa_obj->tbo;
-	csa_tv.shared = true;
-
-	list_add(&csa_tv.head, &list);
-	amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-	if (r) {
-		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
-		return r;
-	}
-
-	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
-	if (!*bo_va) {
-		ttm_eu_backoff_reservation(&ticket, &list);
-		DRM_ERROR("failed to create bo_va for static CSA\n");
-		return -ENOMEM;
-	}
-
-	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
-				AMDGPU_CSA_SIZE);
-	if (r) {
-		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
-		amdgpu_vm_bo_rmv(adev, *bo_va);
-		ttm_eu_backoff_reservation(&ticket, &list);
-		return r;
-	}
-
-	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
-			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-			     AMDGPU_PTE_EXECUTABLE);
-
-	if (r) {
-		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-		amdgpu_vm_bo_rmv(adev, *bo_va);
-		ttm_eu_backoff_reservation(&ticket, &list);
-		return r;
-	}
-
-	ttm_eu_backoff_reservation(&ticket, &list);
-	return 0;
-}
-
 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 {
 	/* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 		goto failed_kiq_read;
 
-	if (in_interrupt())
-		might_sleep();
-
+	might_sleep();
 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 		goto failed_kiq_write;
 
-	if (in_interrupt())
-		might_sleep();
-
+	might_sleep();
 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 
 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 	pr_err("failed to write reg:%x\n", reg);
 }
 
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+					uint32_t reg0, uint32_t reg1,
+					uint32_t ref, uint32_t mask)
+{
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+					    ref, mask);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for IRQ context */
+	if (r < 1 && in_interrupt())
+		goto failed_kiq;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq;
+
+	return;
+
+failed_kiq:
+	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @amdgpu:	amdgpu device.
@@ -390,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 
 	if (adev->fw_vram_usage.va != NULL) {
 		adev->virt.fw_reserve.p_pf2vf =
-			(struct amdgim_pf2vf_info_header *)(
+			(struct amd_sriov_msg_pf2vf_info_header *)(
 			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
 		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
 		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac11..722deef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
  * Firmware Reserve Frame buffer
  */
 struct amdgpu_virt_fw_reserve {
-	struct amdgim_pf2vf_info_header *p_pf2vf;
-	struct amdgim_vf2pf_info_header *p_vf2pf;
+	struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+	struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
 	unsigned int checksum_key;
 };
 /*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
 };
 
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
 	/* the total structure size in byte. */
 	uint32_t size;
 	/* version of this structure, written by the GIM */
 	uint32_t version;
+	/* reserved */
+	uint32_t reserved[2];
 } __aligned(4);
 struct  amdgim_pf2vf_info_v1 {
 	/* header contains size and version */
-	struct amdgim_pf2vf_info_header header;
+	struct amd_sriov_msg_pf2vf_info_header header;
 	/* max_width * max_height */
 	unsigned int uvd_enc_max_pixels_count;
 	/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct  amdgim_pf2vf_info_v1 {
 
 struct  amdgim_pf2vf_info_v2 {
 	/* header contains size and version */
-	struct amdgim_pf2vf_info_header header;
+	struct amd_sriov_msg_pf2vf_info_header header;
 	/* use private key from mailbox 2 to create chueksum */
 	uint32_t checksum;
 	/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct  amdgim_pf2vf_info_v2 {
 	uint64_t vcefw_kboffset;
 	/* VCE FW size in KB */
 	uint32_t vcefw_ksize;
-	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
 } __aligned(4);
 
 
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
 	/* the total structure size in byte. */
 	uint32_t size;
 	/*version of this structure, written by the guest */
 	uint32_t version;
+	/* reserved */
+	uint32_t reserved[2];
 } __aligned(4);
 
 struct amdgim_vf2pf_info_v1 {
 	/* header contains size and version */
-	struct amdgim_vf2pf_info_header header;
+	struct amd_sriov_msg_vf2pf_info_header header;
 	/* driver version */
 	char driver_version[64];
 	/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
 
 struct amdgim_vf2pf_info_v2 {
 	/* header contains size and version */
-	struct amdgim_vf2pf_info_header header;
+	struct amd_sriov_msg_vf2pf_info_header header;
 	uint32_t checksum;
 	/* driver version */
 	uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
 	uint32_t uvd_enc_usage;
 	/* guest uvd engine usage percentage. 0xffff means N/A. */
 	uint32_t uvd_enc_health;
-	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+	uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
 } __aligned(4);
 
 #define AMDGPU_FW_VRAM_VF2PF_VER 2
@@ -238,7 +242,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
 struct amdgpu_virt {
 	uint32_t			caps;
 	struct amdgpu_bo		*csa_obj;
-	uint64_t			csa_vmid0_addr;
 	bool chained_ib_support;
 	uint32_t			reg_val_offs;
 	struct amdgpu_irq_src		ack_irq;
@@ -251,8 +254,6 @@ struct amdgpu_virt {
 	uint32_t gim_feature;
 };
 
-#define AMDGPU_CSA_SIZE		(8 * 1024)
-
 #define amdgpu_sriov_enabled(adev) \
 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
 
@@ -277,17 +278,13 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+					uint32_t reg0, uint32_t rreg1,
+					uint32_t ref, uint32_t mask);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0877ff9..e73d152 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 {
 	entry->priority = 0;
 	entry->tv.bo = &vm->root.base.bo->tbo;
-	entry->tv.shared = true;
+	/* One for the VM updates, one for TTM and one for the CS job */
+	entry->tv.num_shared = 3;
 	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
 }
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 
 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
-	r = reservation_object_reserve_shared(bo->tbo.resv);
-	if (r)
-		return r;
-
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (r)
 		goto error;
@@ -1844,10 +1841,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (r)
 		goto error_free;
 
-	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
-	if (r)
-		goto error_free;
-
 	r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
 	if (r)
 		goto error_free;
@@ -3028,6 +3021,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (r)
 		goto error_free_root;
 
+	r = reservation_object_reserve_shared(root->tbo.resv, 1);
+	if (r)
+		goto error_unreserve;
+
 	r = amdgpu_vm_clear_bo(adev, vm, root,
 			       adev->vm_manager.root_level,
 			       vm->pte_support_ats);
@@ -3057,7 +3054,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	}
 
 	INIT_KFIFO(vm->faults);
-	vm->fault_credit = 16;
 
 	return 0;
 
@@ -3270,42 +3266,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 }
 
 /**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
-				  unsigned int pasid)
-{
-	struct amdgpu_vm *vm;
-
-	spin_lock(&adev->vm_manager.pasid_lock);
-	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
-	if (!vm) {
-		/* VM not found, can't track fault credit */
-		spin_unlock(&adev->vm_manager.pasid_lock);
-		return true;
-	}
-
-	/* No lock needed. only accessed by IRQ handler */
-	if (!vm->fault_credit) {
-		/* Too many faults in this VM */
-		spin_unlock(&adev->vm_manager.pasid_lock);
-		return false;
-	}
-
-	vm->fault_credit--;
-	spin_unlock(&adev->vm_manager.pasid_lock);
-	return true;
-}
-
-/**
  * amdgpu_vm_manager_init - init the VM manager
  *
  * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d..e8dcfd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
 	/* Up to 128 pending retry page faults */
 	DECLARE_KFIFO(faults, u64, 128);
 
-	/* Limit non-retry fault storms */
-	unsigned int		fault_credit;
-
 	/* Points to the KFD process VM info */
 	struct amdkfd_process_info *process_info;
 
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
-				  unsigned int pasid);
 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 			 struct list_head *validated,
 			 struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 897afbb..0b263a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
  */
 #include <linux/list.h>
 #include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
 
 
 static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
 #define AMDGPU_MAX_XGMI_HIVE			8
 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
 
-struct amdgpu_hive_info {
-	uint64_t		hive_id;
-	struct list_head	device_list;
-};
-
 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
 static unsigned hive_count = 0;
 
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+	return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 {
 	int i;
 	struct amdgpu_hive_info *tmp;
@@ -58,62 +59,99 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
 	tmp = &xgmi_hives[hive_count++];
 	tmp->hive_id = adev->gmc.xgmi.hive_id;
 	INIT_LIST_HEAD(&tmp->device_list);
+	mutex_init(&tmp->hive_lock);
+
 	return tmp;
 }
 
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+	int ret = -EINVAL;
+
+	/* Each psp need to set the latest topology */
+	ret = psp_xgmi_set_topology_info(&adev->psp,
+					 hive->number_devices,
+					 &hive->topology_info);
+	if (ret)
+		dev_err(adev->dev,
+			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+			adev->gmc.xgmi.node_id,
+			adev->gmc.xgmi.hive_id, ret);
+	else
+		dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
+			 adev->gmc.xgmi.physical_node_id,
+				 adev->gmc.xgmi.hive_id);
+
+	return ret;
+}
+
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
-	struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+	struct psp_xgmi_topology_info *hive_topology;
 	struct amdgpu_hive_info *hive;
 	struct amdgpu_xgmi	*entry;
-	struct amdgpu_device 	*tmp_adev;
+	struct amdgpu_device *tmp_adev = NULL;
 
 	int count = 0, ret = -EINVAL;
 
-	if ((adev->asic_type < CHIP_VEGA20) ||
-		(adev->flags & AMD_IS_APU) )
+	if (!adev->gmc.xgmi.supported)
 		return 0;
-	adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
+
+	adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
 	adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
 
-	memset(&tmp_topology[0], 0, sizeof(tmp_topology));
 	mutex_lock(&xgmi_mutex);
 	hive = amdgpu_get_xgmi_hive(adev);
 	if (!hive)
 		goto exit;
 
+	hive_topology = &hive->topology_info;
+
 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
 	list_for_each_entry(entry, &hive->device_list, head)
-		tmp_topology[count++].device_id = entry->device_id;
+		hive_topology->nodes[count++].node_id = entry->node_id;
+	hive->number_devices = count;
 
-	ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
-	if (ret) {
-		dev_err(adev->dev,
-			"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
-			adev->gmc.xgmi.device_id,
-			adev->gmc.xgmi.hive_id, ret);
-		goto exit;
-	}
-	/* Each psp need to set the latest topology */
+	/* Each psp need to get the latest topology */
 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
-		ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
+		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
 		if (ret) {
 			dev_err(tmp_adev->dev,
-				"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
-				tmp_adev->gmc.xgmi.device_id,
+				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+				tmp_adev->gmc.xgmi.node_id,
 				tmp_adev->gmc.xgmi.hive_id, ret);
-			/* To do : continue with some  node failed or disable the  whole  hive */
+			/* To do : continue with some node failed or disable the whole hive */
 			break;
 		}
 	}
-	if (!ret)
-		dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
-			adev->gmc.xgmi.physical_node_id,
-			adev->gmc.xgmi.hive_id);
+
+	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+		if (ret)
+			break;
+	}
 
 exit:
 	mutex_unlock(&xgmi_mutex);
 	return ret;
 }
 
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_hive_info *hive;
 
+	if (!adev->gmc.xgmi.supported)
+		return;
+
+	mutex_lock(&xgmi_mutex);
+
+	hive = amdgpu_get_xgmi_hive(adev);
+	if (!hive)
+		goto exit;
+
+	if (!(hive->number_devices--))
+		mutex_destroy(&hive->hive_lock);
+
+exit:
+	mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 0000000..6151eb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+	uint64_t		hive_id;
+	struct list_head	device_list;
+	struct psp_xgmi_topology_info	topology_info;
+	int number_devices;
+	struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 79220a9..86e14c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 
 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		if (enable) {
 			ret = ci_program_pt_config_registers(adev, didt_config_ci);
 			if (ret) {
-				adev->gfx.rlc.funcs->exit_safe_mode(adev);
+				amdgpu_gfx_rlc_exit_safe_mode(adev);
 				return ret;
 			}
 		}
 
 		ci_do_enable_didt(adev, enable);
 
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f5..71c50d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
 	.flush_hdp = &cik_flush_hdp,
 	.invalidate_hdp = &cik_invalidate_hdp,
 	.need_full_reset = &cik_need_full_reset,
+	.init_doorbell_index = &legacy_doorbell_index_init,
 };
 
 static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f1..54c625a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int cik_set_ip_blocks(struct amdgpu_device *adev);
 
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6..8a8b496 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
  * [127:96] - reserved
  */
 
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	u32 ring_index = adev->irq.ih.rptr >> 2;
-	u16 pasid;
-
-	switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-	case 146:
-	case 147:
-		pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-		if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-			return true;
-		break;
-	default:
-		/* Not a VM fault */
-		return true;
-	}
-
-	adev->irq.ih.rptr += 16;
-	return false;
-}
-
  /**
  * cik_ih_decode_iv - decode an interrupt vector
  *
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs cik_ih_funcs = {
 	.get_wptr = cik_ih_get_wptr,
-	.prescreen_iv = cik_ih_prescreen_iv,
 	.decode_iv = cik_ih_decode_iv,
 	.set_rptr = cik_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b918c88..4579519 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 extra_bits = vmid & 0xf;
 
 	/* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	cik_sdma_enable(adev, true);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
+
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 				SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
 					     struct amdgpu_irq_src *source,
 					     struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d..9d3ea29 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
 }
 
 /**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	u32 ring_index = adev->irq.ih.rptr >> 2;
-	u16 pasid;
-
-	switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-	case 146:
-	case 147:
-		pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-		if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-			return true;
-		break;
-	default:
-		/* Not a VM fault */
-		return true;
-	}
-
-	adev->irq.ih.rptr += 16;
-	return false;
-}
-
-/**
  * cz_ih_decode_iv - decode an interrupt vector
  *
  * @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs cz_ih_funcs = {
 	.get_wptr = cz_ih_get_wptr,
-	.prescreen_iv = cz_ih_prescreen_iv,
 	.decode_iv = cz_ih_decode_iv,
 	.set_rptr = cz_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d76eb27..1dc3013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 }
 
 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 	tmp = RREG32(scratch);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 				      CP_ME_CNTL__CE_HALT_MASK));
 		WREG32(mmSCRATCH_UMSK, 0);
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the rings */
 	gfx_v6_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	return 0;
 }
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
 	WREG32(mmCP_RB2_CNTL, tmp);
 	WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
 
-	adev->gfx.compute_ring[0].ready = false;
-	adev->gfx.compute_ring[1].ready = false;
 
 	for (i = 0; i < 2; i++) {
-		r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+		r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
 		if (r)
 			return r;
-		adev->gfx.compute_ring[i].ready = true;
 	}
 
 	return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, val);
 }
 
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 {
 	const u32 *src_ptr;
 	volatile u32 *dst_ptr;
-	u32 dws, i;
+	u32 dws;
 	u64 reg_list_mc_addr;
 	const struct cs_section_def *cs_data;
 	int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (src_ptr) {
-		/* save restore block */
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.save_restore_obj,
-					      &adev->gfx.rlc.save_restore_gpu_addr,
-					      (void **)&adev->gfx.rlc.sr_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
-				 r);
-			gfx_v6_0_rlc_fini(adev);
+		/* init save restore block */
+		r = amdgpu_gfx_rlc_init_sr(adev, dws);
+		if (r)
 			return r;
-		}
-
-		/* write the sr buffer */
-		dst_ptr = adev->gfx.rlc.sr_ptr;
-		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
 	}
 
 	if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
 					      (void **)&adev->gfx.rlc.cs_ptr);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v6_0_rlc_fini(adev);
+			amdgpu_gfx_rlc_fini(adev);
 			return r;
 		}
 
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
 	if (!adev->gfx.rlc_fw)
 		return -EINVAL;
 
-	gfx_v6_0_rlc_stop(adev);
-	gfx_v6_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->stop(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 	gfx_v6_0_init_pg(adev);
 	gfx_v6_0_init_cg(adev);
 
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
 	WREG32(mmRLC_UCODE_ADDR, 0);
 
 	gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
-	gfx_v6_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
 	.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
 };
 
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+	.init = gfx_v6_0_rlc_init,
+	.resume = gfx_v6_0_rlc_resume,
+	.stop = gfx_v6_0_rlc_stop,
+	.reset = gfx_v6_0_rlc_reset,
+	.start = gfx_v6_0_rlc_start
+};
+
 static int gfx_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
 	adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
 	adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
 	adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+	adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
 	gfx_v6_0_set_ring_funcs(adev);
 	gfx_v6_0_set_irq_funcs(adev);
 
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v6_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
-	gfx_v6_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 
 	return 0;
 }
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
 
 	gfx_v6_0_constants_init(adev);
 
-	r = gfx_v6_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	gfx_v6_0_cp_enable(adev, false);
-	gfx_v6_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 	gfx_v6_0_fini_pg(adev);
 
 	return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct amdgpu_ring *ring;
+
+	switch (entry->ring_id) {
+	case 0:
+		ring = &adev->gfx.gfx_ring[0];
+		break;
+	case 1:
+	case 2:
+		ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+		break;
+	default:
+		return;
+	}
+	drm_sched_fault(&ring->sched);
+}
+
 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v6_0_fault(adev, entry);
 	return 0;
 }
 
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v6_0_fault(adev, entry);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0e72bc0..3a9fb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
 
 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				      struct amdgpu_ib *ib,
-				      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+					  struct amdgpu_job *job,
 					  struct amdgpu_ib *ib,
-					  unsigned vmid, bool ctx_switch)
+					  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 	tmp = RREG32(scratch);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the ring */
 	gfx_v7_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	return 0;
 }
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
 		* GFX7_MEC_HPD_SIZE * 2;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		ring = &adev->gfx.compute_ring[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		amdgpu_ring_test_helper(ring);
 	}
 
 	return 0;
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
  * The RLC is a multi-purpose microengine that handles a
  * variety of functions.
  */
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 {
 	const u32 *src_ptr;
-	volatile u32 *dst_ptr;
-	u32 dws, i;
+	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (src_ptr) {
-		/* save restore block */
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.save_restore_obj,
-					      &adev->gfx.rlc.save_restore_gpu_addr,
-					      (void **)&adev->gfx.rlc.sr_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		/* init save restore block */
+		r = amdgpu_gfx_rlc_init_sr(adev, dws);
+		if (r)
 			return r;
-		}
-
-		/* write the sr buffer */
-		dst_ptr = adev->gfx.rlc.sr_ptr;
-		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
-			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
 	}
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v7_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if (adev->gfx.rlc.cp_table_size) {
-
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
-			gfx_v7_0_rlc_fini(adev);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		gfx_v7_0_init_cp_pg_table(adev);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 	}
 
 	return 0;
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
 	return orig;
 }
 
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+	return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
 {
 	u32 tmp, i, mask;
 
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 	}
 }
 
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
 	adev->gfx.rlc_feature_version = le32_to_cpu(
 					hdr->ucode_feature_version);
 
-	gfx_v7_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	/* disable CG */
 	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
 	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
 
-	gfx_v7_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 
 	gfx_v7_0_init_pg(adev);
 
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
 	if (adev->asic_type == CHIP_BONAIRE)
 		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
 
-	gfx_v7_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
 		WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 4;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
 	if (adev->asic_type == CHIP_KAVERI)
-		max_me = 5;
-
-	if (adev->gfx.rlc.cp_table_ptr == NULL)
-		return;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
+		return 5;
+	else
+		return 4;
 }
 
 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
-	.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
-	.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v7_0_set_safe_mode,
+	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
+	.init = gfx_v7_0_rlc_init,
+	.get_csb_size = gfx_v7_0_get_csb_size,
+	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+	.resume = gfx_v7_0_rlc_resume,
+	.stop = gfx_v7_0_rlc_stop,
+	.reset = gfx_v7_0_rlc_reset,
+	.start = gfx_v7_0_rlc_start
 };
 
 static int gfx_v7_0_early_init(void *handle)
@@ -4477,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v7_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
 	gfx_v7_0_cp_compute_fini(adev);
-	gfx_v7_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 	gfx_v7_0_mec_fini(adev);
 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
 				&adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle)
 	gfx_v7_0_constants_init(adev);
 
 	/* init rlc */
-	r = gfx_v7_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle)
 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 	gfx_v7_0_cp_enable(adev, false);
-	gfx_v7_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 	gfx_v7_0_fini_pg(adev);
 
 	return 0;
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle)
 		gfx_v7_0_update_cg(adev, false);
 
 		/* stop the rlc */
-		gfx_v7_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 
 		/* Disable GFX parsing/prefetching */
 		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct amdgpu_ring *ring;
+	u8 me_id, pipe_id;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if ((ring->me == me_id) && (ring->pipe == pipe_id))
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v7_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
 	// XXX soft reset the gfx block only
-	schedule_work(&adev->reset_work);
+	gfx_v7_0_fault(adev, entry);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 617b0c8..381f593b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
 #include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_enum.h"
 #include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
 
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
@@ -54,7 +53,7 @@
 #include "ivsrcid/ivsrcid_vislands30.h"
 
 #define GFX8_NUM_GFX_RINGS     1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
 
 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +838,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +857,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -886,19 +878,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 	ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +901,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 
 	tmp = adev->wb.wb[index];
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+	else
 		r = -EINVAL;
-	}
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1282,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
 	buffer[count++] = cpu_to_le32(0);
 }
 
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 4;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
 	if (adev->asic_type == CHIP_CARRIZO)
-		max_me = 5;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else  if (me == 4) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+		return 5;
+	else
+		return 4;
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 {
-	volatile u32 *dst_ptr;
-	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -1381,44 +1300,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
-			gfx_v8_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v8_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if ((adev->asic_type == CHIP_CARRIZO) ||
 	    (adev->asic_type == CHIP_STONEY)) {
 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		cz_init_cp_jump_table(adev);
-
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
 	}
 
 	return 0;
@@ -1443,7 +1336,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
 	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -1629,7 +1522,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		return 0;
 
 	/* bail if the compute ring is not ready */
-	if (!ring->ready)
+	if (!ring->sched.ready)
 		return 0;
 
 	tmp = RREG32(mmGB_EDC_MODE);
@@ -1997,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
 				+ (ring_id * GFX8_MEC_HPD_SIZE);
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2088,7 +1981,7 @@ static int gfx_v8_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v8_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -2108,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
 		/* no gfx doorbells on iceland */
 		if (adev->asic_type != CHIP_TOPAZ) {
 			ring->use_doorbell = true;
-			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
 		}
 
 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -2181,7 +2074,7 @@ static int gfx_v8_0_sw_fini(void *handle)
 	amdgpu_gfx_kiq_fini(adev);
 
 	gfx_v8_0_mec_fini(adev);
-	gfx_v8_0_rlc_fini(adev);
+	amdgpu_gfx_rlc_fini(adev);
 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
 				&adev->gfx.rlc.clear_state_gpu_addr,
 				(void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4068,15 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
-	gfx_v8_0_rlc_stop(adev);
-	gfx_v8_0_rlc_reset(adev);
+	if (amdgpu_sriov_vf(adev)) {
+		gfx_v8_0_init_csb(adev);
+		return 0;
+	}
+
+	adev->gfx.rlc.funcs->stop(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 	gfx_v8_0_init_pg(adev);
-	gfx_v8_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -4197,7 +4095,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
 		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	WREG32(mmCP_ME_CNTL, tmp);
 	udelay(50);
@@ -4322,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
 
 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
 					DOORBELL_RANGE_LOWER,
-					AMDGPU_DOORBELL_GFX_RING0);
+					adev->doorbell_index.gfx_ring0);
 	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
 
 	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4379,10 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	/* start the ring */
 	amdgpu_ring_clear_ring(ring);
 	gfx_v8_0_cp_gfx_start(adev);
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r)
-		ring->ready = false;
+	ring->sched.ready = true;
+	r = amdgpu_ring_test_helper(ring);
 
 	return r;
 }
@@ -4396,8 +4292,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 	} else {
 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
-		adev->gfx.kiq.ring.ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
+		adev->gfx.kiq.ring.sched.ready = false;
 	}
 	udelay(50);
 }
@@ -4473,11 +4369,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
 	}
 
-	r = amdgpu_ring_test_ring(kiq_ring);
-	if (r) {
+	r = amdgpu_ring_test_helper(kiq_ring);
+	if (r)
 		DRM_ERROR("KCQ enable failed\n");
-		kiq_ring->ready = false;
-	}
 	return r;
 }
 
@@ -4755,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
 {
 	if (adev->asic_type > CHIP_TONGA) {
-		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
-		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
 	}
 	/* enable doorbells */
 	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4781,7 +4675,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
 	amdgpu_bo_kunmap(ring->mqd_obj);
 	ring->mqd_ptr = NULL;
 	amdgpu_bo_unreserve(ring->mqd_obj);
-	ring->ready = true;
+	ring->sched.ready = true;
 	return 0;
 }
 
@@ -4820,10 +4714,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
 	 */
 	for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
 		ring = &adev->gfx.compute_ring[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
 	}
 
 done:
@@ -4867,7 +4758,7 @@ static int gfx_v8_0_hw_init(void *handle)
 	gfx_v8_0_init_golden_registers(adev);
 	gfx_v8_0_constants_init(adev);
 
-	r = gfx_v8_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -4899,7 +4790,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, 0);
 		amdgpu_ring_write(kiq_ring, 0);
 	}
-	r = amdgpu_ring_test_ring(kiq_ring);
+	r = amdgpu_ring_test_helper(kiq_ring);
 	if (r)
 		DRM_ERROR("KCQ disable failed\n");
 
@@ -4973,16 +4864,16 @@ static int gfx_v8_0_hw_fini(void *handle)
 		pr_debug("For SRIOV client, shouldn't do anything.\n");
 		return 0;
 	}
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 	if (!gfx_v8_0_wait_for_idle(adev))
 		gfx_v8_0_cp_enable(adev, false);
 	else
 		pr_err("cp is busy, skip halt cp\n");
 	if (!gfx_v8_0_wait_for_rlc_idle(adev))
-		gfx_v8_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 	else
 		pr_err("rlc is busy, skip halt rlc\n");
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return 0;
 }
 
@@ -5061,17 +4952,16 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
 static int gfx_v8_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 grbm_soft_reset = 0;
 
 	if ((!adev->gfx.grbm_soft_reset) &&
 	    (!adev->gfx.srbm_soft_reset))
 		return 0;
 
 	grbm_soft_reset = adev->gfx.grbm_soft_reset;
-	srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
 	/* stop the rlc */
-	gfx_v8_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
 	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5165,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
 static int gfx_v8_0_post_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 grbm_soft_reset = 0;
 
 	if ((!adev->gfx.grbm_soft_reset) &&
 	    (!adev->gfx.srbm_soft_reset))
 		return 0;
 
 	grbm_soft_reset = adev->gfx.grbm_soft_reset;
-	srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
 	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
 	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
@@ -5197,7 +5086,7 @@ static int gfx_v8_0_post_soft_reset(void *handle)
 	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
 		gfx_v8_0_cp_gfx_resume(adev);
 
-	gfx_v8_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -5445,7 +5334,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
 				AMD_PG_SUPPORT_RLC_SMU_HS |
 				AMD_PG_SUPPORT_CP |
 				AMD_PG_SUPPORT_GFX_DMG))
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
@@ -5499,7 +5388,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
 				AMD_PG_SUPPORT_RLC_SMU_HS |
 				AMD_PG_SUPPORT_CP |
 				AMD_PG_SUPPORT_GFX_DMG))
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return 0;
 }
 
@@ -5593,57 +5482,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-	u32 data;
+	uint32_t rlc_setting;
+
+	rlc_setting = RREG32(mmRLC_CNTL);
+	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+		return false;
+
+	return true;
+}
+
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+	uint32_t data;
 	unsigned i;
-
 	data = RREG32(mmRLC_CNTL);
-	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
+	data |= RLC_SAFE_MODE__CMD_MASK;
+	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+	WREG32(mmRLC_SAFE_MODE, data);
 
-	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		data |= RLC_SAFE_MODE__CMD_MASK;
-		data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-		WREG32(mmRLC_SAFE_MODE, data);
-
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_GPM_STAT) &
-			     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-			      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
-			    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
-			     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
-				break;
-			udelay(1);
-		}
-
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-				break;
-			udelay(1);
-		}
-		adev->gfx.rlc.in_safe_mode = true;
+	/* wait for RLC_SAFE_MODE */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if ((RREG32(mmRLC_GPM_STAT) &
+		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+			break;
+		udelay(1);
+	}
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+			break;
+		udelay(1);
 	}
 }
 
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-	u32 data = 0;
+	uint32_t data;
 	unsigned i;
 
 	data = RREG32(mmRLC_CNTL);
-	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
-
-	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		if (adev->gfx.rlc.in_safe_mode) {
-			data |= RLC_SAFE_MODE__CMD_MASK;
-			data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
-			WREG32(mmRLC_SAFE_MODE, data);
-			adev->gfx.rlc.in_safe_mode = false;
-		}
-	}
+	data |= RLC_SAFE_MODE__CMD_MASK;
+	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+	WREG32(mmRLC_SAFE_MODE, data);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5538,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
-	.enter_safe_mode = iceland_enter_rlc_safe_mode,
-	.exit_safe_mode = iceland_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v8_0_set_safe_mode,
+	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
+	.init = gfx_v8_0_rlc_init,
+	.get_csb_size = gfx_v8_0_get_csb_size,
+	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+	.resume = gfx_v8_0_rlc_resume,
+	.stop = gfx_v8_0_rlc_stop,
+	.reset = gfx_v8_0_rlc_reset,
+	.start = gfx_v8_0_rlc_start
 };
 
 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5556,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 {
 	uint32_t temp, data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	/* It is disabled by HW by default */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5652,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5662,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
 	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5745,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 
 	gfx_v8_0_wait_for_rlc_serdes(adev);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 					    bool enable)
@@ -6131,9 +6025,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				      struct amdgpu_ib *ib,
-				      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,9 +6057,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+					  struct amdgpu_job *job,
 					  struct amdgpu_ib *ib,
-					  unsigned vmid, bool ctx_switch)
+					  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -6738,12 +6636,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	u8 me_id, pipe_id, queue_id;
+	struct amdgpu_ring *ring;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	queue_id = (entry->ring_id & 0x70) >> 4;
+
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if (ring->me == me_id && ring->pipe == pipe_id &&
+			    ring->queue == queue_id)
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v8_0_fault(adev, entry);
 	return 0;
 }
 
@@ -6752,7 +6677,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v8_0_fault(adev, entry);
 	return 0;
 }
 
@@ -6976,10 +6901,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
 		17 + /* gfx_v8_0_ring_emit_vm_flush */
 		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
 	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_compute */
-	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
 	.test_ring = gfx_v8_0_ring_test_ring,
-	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_rreg = gfx_v8_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 21363b2..7556716 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,7 @@
 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
 
 #define GFX9_NUM_GFX_RINGS     1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
 
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
 
 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -396,18 +397,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+	if (r)
 		return r;
-	}
+
 	WREG32(scratch, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
+	if (r)
+		goto error_free_scratch;
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +416,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
 			break;
 		DRM_UDELAY(1);
 	}
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ring->idx, scratch, tmp);
-		r = -EINVAL;
-	}
+
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
+error_free_scratch:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
@@ -443,19 +437,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err1;
-	}
+
 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 	ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +460,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-			DRM_ERROR("amdgpu: IB test timed out.\n");
-			r = -ETIMEDOUT;
-			goto err2;
+		r = -ETIMEDOUT;
+		goto err2;
 	} else if (r < 0) {
-			DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-			goto err2;
+		goto err2;
 	}
 
 	tmp = adev->wb.wb[index];
-	if (tmp == 0xDEADBEEF) {
-			DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
-			r = 0;
-	} else {
-			DRM_ERROR("ib test on ring %d failed\n", ring->idx);
-			r = -EINVAL;
-	}
+	if (tmp == 0xDEADBEEF)
+		r = 0;
+	else
+		r = -EINVAL;
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -660,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+	/*
+	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+	 * instead of picasso_rlc.bin.
+	 * Judgment method:
+	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+	 *          or revision >= 0xD8 && revision <= 0xDF
+	 * otherwise is PCO FP5
+	 */
+	if (!strcmp(chip_name, "picasso") &&
+		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+	else
+		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 	if (err)
 		goto out;
@@ -1065,85 +1064,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
 }
 
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
 {
-	const __le32 *fw_data;
-	volatile u32 *dst_ptr;
-	int me, i, max_me = 5;
-	u32 bo_offset = 0;
-	u32 table_offset, table_size;
-
-	/* write the cp table buffer */
-	dst_ptr = adev->gfx.rlc.cp_table_ptr;
-	for (me = 0; me < max_me; me++) {
-		if (me == 0) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.ce_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 1) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.pfp_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 2) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.me_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else if (me == 3) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		} else  if (me == 4) {
-			const struct gfx_firmware_header_v1_0 *hdr =
-				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
-			fw_data = (const __le32 *)
-				(adev->gfx.mec2_fw->data +
-				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-			table_offset = le32_to_cpu(hdr->jt_offset);
-			table_size = le32_to_cpu(hdr->jt_size);
-		}
-
-		for (i = 0; i < table_size; i ++) {
-			dst_ptr[bo_offset + i] =
-				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
-		}
-
-		bo_offset += table_size;
-	}
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
-{
-	/* clear state block */
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
-			&adev->gfx.rlc.clear_state_gpu_addr,
-			(void **)&adev->gfx.rlc.cs_ptr);
-
-	/* jump table block */
-	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
-			&adev->gfx.rlc.cp_table_gpu_addr,
-			(void **)&adev->gfx.rlc.cp_table_ptr);
+	return 5;
 }
 
 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 {
-	volatile u32 *dst_ptr;
-	u32 dws;
 	const struct cs_section_def *cs_data;
 	int r;
 
@@ -1152,45 +1079,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 	cs_data = adev->gfx.rlc.cs_data;
 
 	if (cs_data) {
-		/* clear state block */
-		adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
-		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
-					      AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.clear_state_obj,
-					      &adev->gfx.rlc.clear_state_gpu_addr,
-					      (void **)&adev->gfx.rlc.cs_ptr);
-		if (r) {
-			dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
-				r);
-			gfx_v9_0_rlc_fini(adev);
+		/* init clear state block */
+		r = amdgpu_gfx_rlc_init_csb(adev);
+		if (r)
 			return r;
-		}
-		/* set up the cs buffer */
-		dst_ptr = adev->gfx.rlc.cs_ptr;
-		gfx_v9_0_get_csb_buffer(adev, dst_ptr);
-		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
 	}
 
 	if (adev->asic_type == CHIP_RAVEN) {
 		/* TODO: double check the cp_table_size for RV */
 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
-		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
-					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-					      &adev->gfx.rlc.cp_table_obj,
-					      &adev->gfx.rlc.cp_table_gpu_addr,
-					      (void **)&adev->gfx.rlc.cp_table_ptr);
-		if (r) {
-			dev_err(adev->dev,
-				"(%d) failed to create cp table bo\n", r);
-			gfx_v9_0_rlc_fini(adev);
+		r = amdgpu_gfx_rlc_init_cpt(adev);
+		if (r)
 			return r;
-		}
-
-		rv_init_cp_jump_table(adev);
-		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
-		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
 	}
 
 	switch (adev->asic_type) {
@@ -1264,7 +1164,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
 
 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &adev->gfx.mec.hpd_eop_obj,
 				      &adev->gfx.mec.hpd_eop_gpu_addr,
 				      (void **)&hpd);
@@ -1635,8 +1535,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
 	/* Clear GDS reserved memory */
 	r = amdgpu_ring_alloc(ring, 17);
 	if (r) {
-		DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+		DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+			  ring->name, r);
 		return r;
 	}
 
@@ -1680,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
 
 	ring->ring_obj = NULL;
 	ring->use_doorbell = true;
-	ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
 				+ (ring_id * GFX9_MEC_HPD_SIZE);
 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1748,7 +1648,7 @@ static int gfx_v9_0_sw_init(void *handle)
 		return r;
 	}
 
-	r = gfx_v9_0_rlc_init(adev);
+	r = adev->gfx.rlc.funcs->init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init rlc BOs!\n");
 		return r;
@@ -1769,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle)
 		else
 			sprintf(ring->name, "gfx_%d", i);
 		ring->use_doorbell = true;
-		ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
 		r = amdgpu_ring_init(adev, ring, 1024,
 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
@@ -2499,12 +2399,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 		return 0;
 	}
 
-	gfx_v9_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	/* disable CG */
 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-	gfx_v9_0_rlc_reset(adev);
+	adev->gfx.rlc.funcs->reset(adev);
 
 	gfx_v9_0_init_pg(adev);
 
@@ -2515,15 +2415,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 			return r;
 	}
 
-	if (adev->asic_type == CHIP_RAVEN ||
-	    adev->asic_type == CHIP_VEGA20) {
-		if (amdgpu_lbpw != 0)
+	switch (adev->asic_type) {
+	case CHIP_RAVEN:
+		if (amdgpu_lbpw == 0)
+			gfx_v9_0_enable_lbpw(adev, false);
+		else
+			gfx_v9_0_enable_lbpw(adev, true);
+		break;
+	case CHIP_VEGA20:
+		if (amdgpu_lbpw > 0)
 			gfx_v9_0_enable_lbpw(adev, true);
 		else
 			gfx_v9_0_enable_lbpw(adev, false);
+		break;
+	default:
+		break;
 	}
 
-	gfx_v9_0_rlc_start(adev);
+	adev->gfx.rlc.funcs->start(adev);
 
 	return 0;
 }
@@ -2538,7 +2447,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
 	if (!enable) {
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-			adev->gfx.gfx_ring[i].ready = false;
+			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
 	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
 	udelay(50);
@@ -2728,7 +2637,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
 	/* start the ring */
 	gfx_v9_0_cp_gfx_start(adev);
-	ring->ready = true;
+	ring->sched.ready = true;
 
 	return 0;
 }
@@ -2743,8 +2652,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
-			adev->gfx.compute_ring[i].ready = false;
-		adev->gfx.kiq.ring.ready = false;
+			adev->gfx.compute_ring[i].sched.ready = false;
+		adev->gfx.kiq.ring.sched.ready = false;
 	}
 	udelay(50);
 }
@@ -2867,11 +2776,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
 	}
 
-	r = amdgpu_ring_test_ring(kiq_ring);
-	if (r) {
+	r = amdgpu_ring_test_helper(kiq_ring);
+	if (r)
 		DRM_ERROR("KCQ enable failed\n");
-		kiq_ring->ready = false;
-	}
 
 	return r;
 }
@@ -3089,9 +2996,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
 	/* enable the doorbell if requested */
 	if (ring->use_doorbell) {
 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
-					(AMDGPU_DOORBELL64_KIQ *2) << 2);
+					(adev->doorbell_index.kiq * 2) << 2);
 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-					(AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+					(adev->doorbell_index.userqueue_end * 2) << 2);
 	}
 
 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -3250,7 +3157,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
 	amdgpu_bo_kunmap(ring->mqd_obj);
 	ring->mqd_ptr = NULL;
 	amdgpu_bo_unreserve(ring->mqd_obj);
-	ring->ready = true;
+	ring->sched.ready = true;
 	return 0;
 }
 
@@ -3315,19 +3222,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
 		return r;
 
 	ring = &adev->gfx.gfx_ring[0];
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		return r;
-	}
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		ring = &adev->gfx.compute_ring[i];
-
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r)
-			ring->ready = false;
+		amdgpu_ring_test_helper(ring);
 	}
 
 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3354,7 +3255,7 @@ static int gfx_v9_0_hw_init(void *handle)
 	if (r)
 		return r;
 
-	r = gfx_v9_0_rlc_resume(adev);
+	r = adev->gfx.rlc.funcs->resume(adev);
 	if (r)
 		return r;
 
@@ -3392,7 +3293,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
 		amdgpu_ring_write(kiq_ring, 0);
 		amdgpu_ring_write(kiq_ring, 0);
 	}
-	r = amdgpu_ring_test_ring(kiq_ring);
+	r = amdgpu_ring_test_helper(kiq_ring);
 	if (r)
 		DRM_ERROR("KCQ disable failed\n");
 
@@ -3434,7 +3335,7 @@ static int gfx_v9_0_hw_fini(void *handle)
 	}
 
 	gfx_v9_0_cp_enable(adev, false);
-	gfx_v9_0_rlc_stop(adev);
+	adev->gfx.rlc.funcs->stop(adev);
 
 	gfx_v9_0_csb_vram_unpin(adev);
 
@@ -3509,7 +3410,7 @@ static int gfx_v9_0_soft_reset(void *handle)
 
 	if (grbm_soft_reset) {
 		/* stop the rlc */
-		gfx_v9_0_rlc_stop(adev);
+		adev->gfx.rlc.funcs->stop(adev);
 
 		/* Disable GFX parsing/prefetching */
 		gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3608,64 +3509,47 @@ static int gfx_v9_0_late_init(void *handle)
 	return 0;
 }
 
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
 {
-	uint32_t rlc_setting, data;
-	unsigned i;
-
-	if (adev->gfx.rlc.in_safe_mode)
-		return;
+	uint32_t rlc_setting;
 
 	/* if RLC is not enabled, do nothing */
 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
+		return false;
 
-	if (adev->cg_flags &
-	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
-	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
-		data = RLC_SAFE_MODE__CMD_MASK;
-		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
-		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+	return true;
+}
 
-		/* wait for RLC_SAFE_MODE */
-		for (i = 0; i < adev->usec_timeout; i++) {
-			if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
-				break;
-			udelay(1);
-		}
-		adev->gfx.rlc.in_safe_mode = true;
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+{
+	uint32_t data;
+	unsigned i;
+
+	data = RLC_SAFE_MODE__CMD_MASK;
+	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+
+	/* wait for RLC_SAFE_MODE */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+			break;
+		udelay(1);
 	}
 }
 
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
 {
-	uint32_t rlc_setting, data;
+	uint32_t data;
 
-	if (!adev->gfx.rlc.in_safe_mode)
-		return;
-
-	/* if RLC is not enabled, do nothing */
-	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
-		return;
-
-	if (adev->cg_flags &
-	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
-		/*
-		 * Try to exit safe mode only if it is already in safe
-		 * mode.
-		 */
-		data = RLC_SAFE_MODE__CMD_MASK;
-		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
-		adev->gfx.rlc.in_safe_mode = false;
-	}
+	data = RLC_SAFE_MODE__CMD_MASK;
+	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
 }
 
 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 						bool enable)
 {
-	gfx_v9_0_enter_rlc_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3676,7 +3560,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 		gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
 	}
 
-	gfx_v9_0_exit_rlc_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3774,7 +3658,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
 {
 	uint32_t data, def;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	/* Enable 3D CGCG/CGLS */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3814,7 +3698,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3822,7 +3706,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 {
 	uint32_t def, data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3862,7 +3746,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
 	}
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 }
 
 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3891,8 +3775,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
-	.enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
-	.exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+	.set_safe_mode = gfx_v9_0_set_safe_mode,
+	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
+	.init = gfx_v9_0_rlc_init,
+	.get_csb_size = gfx_v9_0_get_csb_size,
+	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
+	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+	.resume = gfx_v9_0_rlc_resume,
+	.stop = gfx_v9_0_rlc_stop,
+	.reset = gfx_v9_0_rlc_reset,
+	.start = gfx_v9_0_rlc_start
 };
 
 static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4073,9 +3966,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                      struct amdgpu_ib *ib,
-                                      unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	u32 header, control = 0;
 
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4104,20 +3999,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                          struct amdgpu_ib *ib,
-                                          unsigned vmid, bool ctx_switch)
+					  struct amdgpu_job *job,
+					  struct amdgpu_ib *ib,
+					  bool ctx_switch)
 {
-        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
-        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
-        amdgpu_ring_write(ring,
+	amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-                                (2 << 0) |
+				(2 << 0) |
 #endif
-                                lower_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, control);
+				lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, control);
 }
 
 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4696,12 +4593,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+			   struct amdgpu_iv_entry *entry)
+{
+	u8 me_id, pipe_id, queue_id;
+	struct amdgpu_ring *ring;
+	int i;
+
+	me_id = (entry->ring_id & 0x0c) >> 2;
+	pipe_id = (entry->ring_id & 0x03) >> 0;
+	queue_id = (entry->ring_id & 0x70) >> 4;
+
+	switch (me_id) {
+	case 0:
+		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+		break;
+	case 1:
+	case 2:
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			ring = &adev->gfx.compute_ring[i];
+			if (ring->me == me_id && ring->pipe == pipe_id &&
+			    ring->queue == queue_id)
+				drm_sched_fault(&ring->sched);
+		}
+		break;
+	}
+}
+
 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
 				 struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal register access in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v9_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4710,7 +4634,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
 				  struct amdgpu_iv_entry *entry)
 {
 	DRM_ERROR("Illegal instruction in command stream\n");
-	schedule_work(&adev->reset_work);
+	gfx_v9_0_fault(adev, entry);
 	return 0;
 }
 
@@ -4837,10 +4761,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
 		2 + /* gfx_v9_0_ring_emit_vm_flush */
 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_compute */
-	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
 	.test_ring = gfx_v9_0_ring_test_ring,
-	.test_ib = gfx_v9_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index bfa317a..f5edddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
 	return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
 }
 
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
 {
-	uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+	/* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+	int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+			- mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-		     lower_32_bits(value));
+	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+				offset * vmid, lower_32_bits(page_table_base));
 
-	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-		     upper_32_bits(value));
+	WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+				offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-	gfxhub_v1_0_init_gart_pt_regs(adev);
+	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+	gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 		     (u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 206e29c..92d3a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					  bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
 u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 73ad02a..9fc3296 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -359,7 +359,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+				uint32_t vmid, uint32_t flush_type)
 {
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
@@ -581,7 +582,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	else
 		gmc_v6_0_set_fault_enable_default(adev, true);
 
-	gmc_v6_0_flush_gpu_tlb(adev, 0);
+	gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce..761dcfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+				uint32_t vmid, uint32_t flush_type)
 {
 	/* bits 0-15 are the VM contexts0-15 */
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 		WREG32(mmCHUB_CONTROL, tmp);
 	}
 
-	gmc_v7_0_flush_gpu_tlb(adev, 0);
+	gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 747c068..1ad7e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -633,7 +633,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  * Flush the TLB for the requested page table (CIK).
  */
 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
-					uint32_t vmid)
+				uint32_t vmid, uint32_t flush_type)
 {
 	/* bits 0-15 are the VM contexts0-15 */
 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -942,7 +942,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	else
 		gmc_v8_0_set_fault_enable_default(adev, true);
 
-	gmc_v8_0_flush_gpu_tlb(adev, 0);
+	gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
 		 (unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f35d7a5..ce150de 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 	return 0;
 }
 
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+				  struct amdgpu_iv_entry *entry,
+				  uint64_t addr)
+{
+	struct amdgpu_vm *vm;
+	u64 key;
+	int r;
+
+	/* No PASID, can't identify faulting process */
+	if (!entry->pasid)
+		return true;
+
+	/* Not a retry fault */
+	if (!(entry->src_data[1] & 0x80))
+		return true;
+
+	/* Track retry faults in per-VM fault FIFO. */
+	spin_lock(&adev->vm_manager.pasid_lock);
+	vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+	if (!vm) {
+		/* VM not found, process it normally */
+		spin_unlock(&adev->vm_manager.pasid_lock);
+		return true;
+	}
+
+	key = AMDGPU_VM_FAULT(entry->pasid, addr);
+	r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+	/* Hash table is full or the fault is already being processed,
+	 * ignore further page faults
+	 */
+	if (r != 0) {
+		spin_unlock(&adev->vm_manager.pasid_lock);
+		return false;
+	}
+	/* No locking required with single writer and single reader */
+	r = kfifo_put(&vm->faults, key);
+	if (!r) {
+		/* FIFO is full. Ignore it until there is space */
+		amdgpu_vm_clear_fault(vm->fault_hash, key);
+		spin_unlock(&adev->vm_manager.pasid_lock);
+		return false;
+	}
+
+	spin_unlock(&adev->vm_manager.pasid_lock);
+	/* It's the first fault for this address, process it normally */
+	return true;
+}
+
 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
 				struct amdgpu_irq_src *source,
 				struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
 	addr = (u64)entry->src_data[0] << 12;
 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 
+	if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+		return 1; /* This also prevents sending it to KFD */
+
 	if (!amdgpu_sriov_vf(adev)) {
 		status = RREG32(hub->vm_l2_pro_fault_status);
 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -293,14 +352,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 }
 
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+					uint32_t flush_type)
 {
 	u32 req = 0;
 
-	/* invalidate using legacy mode on vmid*/
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
-	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +371,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
 	return req;
 }
 
-static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-						  uint32_t reg0, uint32_t reg1,
-						  uint32_t ref, uint32_t mask)
-{
-	signed long r, cnt = 0;
-	unsigned long flags;
-	uint32_t seq;
-	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-	struct amdgpu_ring *ring = &kiq->ring;
-
-	spin_lock_irqsave(&kiq->ring_lock, flags);
-
-	amdgpu_ring_alloc(ring, 32);
-	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
-					    ref, mask);
-	amdgpu_fence_emit_polling(ring, &seq);
-	amdgpu_ring_commit(ring);
-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-	/* don't wait anymore for IRQ context */
-	if (r < 1 && in_interrupt())
-		goto failed_kiq;
-
-	might_sleep();
-
-	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-	}
-
-	if (cnt > MAX_KIQ_REG_TRY)
-		goto failed_kiq;
-
-	return 0;
-
-failed_kiq:
-	pr_err("failed to invalidate tlb with kiq\n");
-	return r;
-}
-
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +379,50 @@ static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
  */
 
 /**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
  *
  * @adev: amdgpu_device pointer
  * @vmid: vm instance to flush
+ * @flush_type: the flush type
  *
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
  */
 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
-					uint32_t vmid)
+				uint32_t vmid, uint32_t flush_type)
 {
-	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned i, j;
-	int r;
 
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
-		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-		if (adev->gfx.kiq.ring.ready &&
+		/* This is necessary for a HW workaround under SRIOV as well
+		 * as GFXOFF under bare metal
+		 */
+		if (adev->gfx.kiq.ring.sched.ready &&
 		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
 		    !adev->in_gpu_reset) {
-			r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
-				hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
-			if (!r)
-				continue;
+			uint32_t req = hub->vm_inv_eng0_req + eng;
+			uint32_t ack = hub->vm_inv_eng0_ack + eng;
+
+			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+							   1 << vmid);
+			continue;
 		}
 
 		spin_lock(&adev->gmc.invalidate_lock);
-
 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
-
-		/* Busy wait for ACK.*/
-		for (j = 0; j < 100; j++) {
-			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-			tmp &= 1 << vmid;
-			if (tmp)
-				break;
-			cpu_relax();
-		}
-		if (j < 100) {
-			spin_unlock(&adev->gmc.invalidate_lock);
-			continue;
-		}
-
-		/* Wait for ACK with a delay.*/
 		for (j = 0; j < adev->usec_timeout; j++) {
 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-			tmp &= 1 << vmid;
-			if (tmp)
+			if (tmp & (1 << vmid))
 				break;
 			udelay(1);
 		}
-		if (j < adev->usec_timeout) {
-			spin_unlock(&adev->gmc.invalidate_lock);
-			continue;
-		}
 		spin_unlock(&adev->gmc.invalidate_lock);
+		if (j < adev->usec_timeout)
+			continue;
+
 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 	}
 }
@@ -429,7 +432,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
 	unsigned eng = ring->vm_inv_eng;
 
 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -739,9 +742,8 @@ static int gmc_v9_0_late_init(void *handle)
 		unsigned vmhub = ring->funcs->vmhub;
 
 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
-		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
-			 ring->idx, ring->name, ring->vm_inv_eng,
-			 ring->funcs->vmhub);
+		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
 	}
 
 	/* Engine 16 is used for KFD and 17 for GART flushes */
@@ -959,6 +961,9 @@ static int gmc_v9_0_sw_init(void *handle)
 	/* This interrupt is VMC page fault.*/
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
 				&adev->gmc.vm_fault);
+	if (r)
+		return r;
+
 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
 				&adev->gmc.vm_fault);
 
@@ -991,7 +996,7 @@ static int gmc_v9_0_sw_init(void *handle)
 	}
 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
 
-	if (adev->asic_type == CHIP_VEGA20) {
+	if (adev->gmc.xgmi.supported) {
 		r = gfxhub_v1_1_get_xgmi_info(adev);
 		if (r)
 			return r;
@@ -1122,7 +1127,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 
 	gfxhub_v1_0_set_fault_enable_default(adev, value);
 	mmhub_v1_0_set_fault_enable_default(adev, value);
-	gmc_v9_0_flush_gpu_tlb(adev, 0);
+	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
 
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61..a3984d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
 }
 
 /**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	u32 ring_index = adev->irq.ih.rptr >> 2;
-	u16 pasid;
-
-	switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-	case 146:
-	case 147:
-		pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-		if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-			return true;
-		break;
-	default:
-		/* Not a VM fault */
-		return true;
-	}
-
-	adev->irq.ih.rptr += 16;
-	return false;
-}
-
-/**
  * iceland_ih_decode_iv - decode an interrupt vector
  *
  * @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs iceland_ih_funcs = {
 	.get_wptr = iceland_ih_get_wptr,
-	.prescreen_iv = iceland_ih_prescreen_iv,
 	.decode_iv = iceland_ih_decode_iv,
 	.set_rptr = iceland_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f..0c9a2c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
 	    pi->caps_db_ramping ||
 	    pi->caps_td_ramping ||
 	    pi->caps_tcp_ramping) {
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		if (enable) {
 			ret = kv_program_pt_config_registers(adev, didt_config_kv);
 			if (ret) {
-				adev->gfx.rlc.funcs->exit_safe_mode(adev);
+				amdgpu_gfx_rlc_exit_safe_mode(adev);
 				return ret;
 			}
 		}
 
 		kv_do_enable_didt(adev, enable);
 
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a0db67a..d0d966d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
 	return base;
 }
 
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base)
 {
-	uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+	/* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+	int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+			- mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 
-	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-		     lower_32_bits(value));
+	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+			offset * vmid, lower_32_bits(page_table_base));
 
-	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-		     upper_32_bits(value));
+	WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+			offset * vmid, upper_32_bits(page_table_base));
 }
 
 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 {
-	mmhub_v1_0_init_gart_pt_regs(adev);
+	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+	mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 		     (u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bef3d0c..0de0fdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
 void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                                 bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+				uint64_t page_table_base);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d..6a0fcd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
 #include "gmc/gmc_8_2_sh_mask.h"
 #include "oss/oss_3_0_d.h"
 #include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 #include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83..0de00fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
     GFX_CTRL_CMD_ID_ENABLE_INT      = 0x00050000,   /* enable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_DISABLE_INT     = 0x00060000,   /* disable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_MODE1_RST       = 0x00070000,   /* trigger the Mode 1 reset */
+    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x000A0000,   /* send interrupt to psp for updating write pointer of vf */
+    GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
 
     GFX_CTRL_CMD_ID_MAX             = 0x000F0000,   /* max command ID */
 };
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
     GFX_CMD_ID_LOAD_IP_FW   = 0x00000006,   /* load HW IP FW */
     GFX_CMD_ID_DESTROY_TMR  = 0x00000007,   /* destroy TMR region */
     GFX_CMD_ID_SAVE_RESTORE = 0x00000008,   /* save/restore HW IP FW */
-
+    GFX_CMD_ID_SETUP_VMR    = 0x00000009,   /* setup VMR region */
+    GFX_CMD_ID_DESTROY_VMR  = 0x0000000A,   /* destroy VMR region */
 };
 
 
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c220..d78b430 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
 			       enum psp_ring_type ring_type)
 {
 	int ret = 0;
-	struct psp_ring *ring;
 	unsigned int psp_ring_reg = 0;
 	struct amdgpu_device *adev = psp->adev;
 
-	ring = &psp->km_ring;
-
 	/* Write the ring destroy command to C2PMSG_64 */
 	psp_ring_reg = 3 << 16;
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 3f3fac2..6c9a1b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
 #include "nbio/nbio_7_4_offset.h"
 
 MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS		0x3010024
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	const char *chip_name;
 	char fw_name[30];
 	int err = 0;
-	const struct psp_firmware_header_v1_0 *hdr;
+	const struct psp_firmware_header_v1_0 *sos_hdr;
+	const struct ta_firmware_header_v1_0 *ta_hdr;
 
 	DRM_DEBUG("\n");
 
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
 	if (err)
 		goto out;
 
-	hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
-	adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
-	adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
-	adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
-	adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
-					le32_to_cpu(hdr->sos_size_bytes);
-	adev->psp.sys_start_addr = (uint8_t *)hdr +
-				le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+	adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+	adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+	adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+	adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+					le32_to_cpu(sos_hdr->sos_size_bytes);
+	adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
 	adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
-				le32_to_cpu(hdr->sos_offset_bytes);
+				le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+	err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+
+	err = amdgpu_ucode_validate(adev->psp.ta_fw);
+	if (err)
+		goto out;
+
+	ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+	adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+	adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+	adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
 	return 0;
 out:
 	if (err) {
@@ -153,8 +171,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
 	 * are already been loaded.
 	 */
 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-	if (sol_reg)
+	if (sol_reg) {
+		psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+		printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
 		return 0;
+	}
 
 	/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -167,7 +188,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
 	/* Copy PSP System Driver binary to memory */
 	memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-	/* Provide the sys driver to bootrom */
+	/* Provide the sys driver to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +229,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
 	/* Copy Secure OS binary to PSP memory */
 	memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-	/* Provide the PSP secure OS to bootrom */
+	/* Provide the PSP secure OS to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 2 << 16;
@@ -278,26 +299,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
 	struct psp_ring *ring = &psp->km_ring;
 	struct amdgpu_device *adev = psp->adev;
 
-	/* Write low address of the ring to C2PMSG_69 */
-	psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
-	/* Write high address of the ring to C2PMSG_70 */
-	psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
-	/* Write size of ring to C2PMSG_71 */
-	psp_ring_reg = ring->ring_size;
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
-	/* Write the ring initialization command to C2PMSG_64 */
-	psp_ring_reg = ring_type;
-	psp_ring_reg = psp_ring_reg << 16;
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+	if (psp_support_vmr_ring(psp)) {
+		/* Write low address of the ring to C2PMSG_102 */
+		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+		/* Write high address of the ring to C2PMSG_103 */
+		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
 
-	/* there might be handshake issue with hardware which needs delay */
-	mdelay(20);
+		/* Write the ring initialization command to C2PMSG_101 */
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+					     GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
 
-	/* Wait for response flag (bit 31) in C2PMSG_64 */
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-			   0x80000000, 0x8000FFFF, false);
+		/* there might be handshake issue with hardware which needs delay */
+		mdelay(20);
+
+		/* Wait for response flag (bit 31) in C2PMSG_101 */
+		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+				   0x80000000, 0x8000FFFF, false);
+
+	} else {
+		/* Write low address of the ring to C2PMSG_69 */
+		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+		/* Write high address of the ring to C2PMSG_70 */
+		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+		/* Write size of ring to C2PMSG_71 */
+		psp_ring_reg = ring->ring_size;
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+		/* Write the ring initialization command to C2PMSG_64 */
+		psp_ring_reg = ring_type;
+		psp_ring_reg = psp_ring_reg << 16;
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+		/* there might be handshake issue with hardware which needs delay */
+		mdelay(20);
+
+		/* Wait for response flag (bit 31) in C2PMSG_64 */
+		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+				   0x80000000, 0x8000FFFF, false);
+	}
 
 	return ret;
 }
@@ -308,15 +350,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
 	int ret = 0;
 	struct amdgpu_device *adev = psp->adev;
 
-	/* Write the ring destroy command to C2PMSG_64 */
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+	/* Write the ring destroy command*/
+	if (psp_support_vmr_ring(psp))
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+				     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+	else
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+				     GFX_CTRL_CMD_ID_DESTROY_RINGS);
 
 	/* there might be handshake issue with hardware which needs delay */
 	mdelay(20);
 
-	/* Wait for response flag (bit 31) in C2PMSG_64 */
-	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-			   0x80000000, 0x80000000, false);
+	/* Wait for response flag (bit 31) */
+	if (psp_support_vmr_ring(psp))
+		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+				   0x80000000, 0x80000000, false);
+	else
+		ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+				   0x80000000, 0x80000000, false);
 
 	return ret;
 }
@@ -355,7 +406,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
 
 	/* KM (GPCOM) prepare write pointer */
-	psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+	if (psp_support_vmr_ring(psp))
+		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+	else
+		psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
 	/* Update KM RB frame pointer to new frame */
 	/* write_frame ptr increments by size of rb_frame in bytes */
@@ -384,7 +438,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
 
 	/* Update the write Pointer in DWORDs */
 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+	if (psp_support_vmr_ring(psp)) {
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+	} else
+		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
 
 	return 0;
 }
@@ -529,7 +587,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
 	/*send the mode 1 reset command*/
 	WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
 
-	mdelay(1000);
+	msleep(500);
 
 	offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
 
@@ -552,24 +610,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
 static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
 	int number_devices, struct psp_xgmi_topology_info *topology)
 {
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+	int i;
+	int ret;
+
+	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+		return -EINVAL;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	/* Fill in the shared memory with topology information as input */
+	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+	topology_info_input->num_nodes = number_devices;
+
+	for (i = 0; i < topology_info_input->num_nodes; i++) {
+		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+	}
+
+	/* Invoke xgmi ta to get the topology information */
+	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+	if (ret)
+		return ret;
+
+	/* Read the output topology information from the shared memory */
+	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+	for (i = 0; i < topology->num_nodes; i++) {
+		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+	}
+
 	return 0;
 }
 
 static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
 	int number_devices, struct psp_xgmi_topology_info *topology)
 {
-	return 0;
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+	int i;
+
+	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+		return -EINVAL;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+	topology_info_input->num_nodes = number_devices;
+
+	for (i = 0; i < topology_info_input->num_nodes; i++) {
+		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+	}
+
+	/* Invoke xgmi ta to set topology information */
+	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
 }
 
 static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
 {
-	u64 hive_id = 0;
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
 
-	/* Remove me when we can get correct hive_id through PSP */
-	if (psp->adev->gmc.xgmi.num_physical_nodes)
-		hive_id = 0x123456789abcdef;
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
-	return hive_id;
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+	/* Invoke xgmi ta to get hive id */
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+	if (ret)
+		return 0;
+	else
+		return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+}
+
+static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+{
+	struct ta_xgmi_shared_memory *xgmi_cmd;
+	int ret;
+
+	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+	/* Invoke xgmi ta to get the node id */
+	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+	if (ret)
+		return 0;
+	else
+		return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
 }
 
 static const struct psp_funcs psp_v11_0_funcs = {
@@ -587,6 +731,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
 	.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
 	.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
 	.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+	.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
 };
 
 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e1ebf77..7357fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
 	/* Copy PSP System Driver binary to memory */
 	memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
 
-	/* Provide the sys driver to bootrom */
+	/* Provide the sys driver to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 1 << 16;
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
 	/* Copy Secure OS binary to PSP memory */
 	memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
 
-	/* Provide the PSP secure OS to bootrom */
+	/* Provide the PSP secure OS to bootloader */
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
 	       (uint32_t)(psp->fw_pri_mc_addr >> 20));
 	psp_gfxdrv_command_reg = 2 << 16;
@@ -356,12 +356,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
 			      enum psp_ring_type ring_type)
 {
 	int ret = 0;
-	struct psp_ring *ring;
 	unsigned int psp_ring_reg = 0;
 	struct amdgpu_device *adev = psp->adev;
 
-	ring = &psp->km_ring;
-
 	/* Write the ring destroy command to C2PMSG_64 */
 	psp_ring_reg = 3 << 16;
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,9 +590,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
 	}
 
 	/*send the mode 1 reset command*/
-	WREG32(offset, 0x70000);
+	WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
 
-	mdelay(1000);
+	msleep(500);
 
 	offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2d4770e..9f3cb2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+				   struct amdgpu_job *job,
 				   struct amdgpu_ib *ib,
-				   unsigned vmid, bool ctx_switch)
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	sdma_v2_4_enable(adev, true);
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id, queue_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	queue_id = (entry->ring_id & 0xc) >> 2;
+
+	if (instance_id <= 1 && queue_id == 0)
+		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6fb3eda..1bccc5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+				   struct amdgpu_job *job,
 				   struct amdgpu_ib *ib,
-				   unsigned vmid, bool ctx_switch)
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 	}
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 		/* enable DMA IBs */
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 	}
 
 	/* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  */
 static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1163,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
 		if (!amdgpu_sriov_vf(adev)) {
 			ring->use_doorbell = true;
 			ring->doorbell_index = (i == 0) ?
-				AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+				adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
 		} else {
 			ring->use_pollmem = true;
 		}
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	u8 instance_id, queue_id;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	queue_id = (entry->ring_id & 0xc) >> 2;
+
+	if (instance_id <= 1 && queue_id == 0)
+		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7a8c917..4b6d3e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
 
+#define WREG32_SDMA(instance, offset, value) \
+	WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+	RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
 		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
 	} else {
-		u32 lowbit, highbit;
-
-		lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
-		highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
-		DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
-				ring->me, highbit, lowbit);
-		wptr = highbit;
+		wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
 		wptr = wptr << 32;
-		wptr |= lowbit;
+		wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+		DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+				ring->me, wptr);
 	}
 
 	return wptr >> 2;
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
 				lower_32_bits(ring->wptr << 2),
 				ring->me,
 				upper_32_bits(ring->wptr << 2));
-		WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
-		WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+			    lower_32_bits(ring->wptr << 2));
+		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+			    upper_32_bits(ring->wptr << 2));
+	}
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u64 wptr;
+
+	if (ring->use_doorbell) {
+		/* XXX check if swapping is necessary on BE */
+		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+	} else {
+		wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+		wptr = wptr << 32;
+		wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+	}
+
+	return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->use_doorbell) {
+		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+		/* XXX check if swapping is necessary on BE */
+		WRITE_ONCE(*wb, (ring->wptr << 2));
+		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+	} else {
+		uint64_t wptr = ring->wptr << 2;
+
+		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+			    lower_32_bits(wptr));
+		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+			    upper_32_bits(wptr));
 	}
 }
 
 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	int i;
 
 	for (i = 0; i < count; i++)
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VEGA10).
  */
 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-					struct amdgpu_ib *ib,
-					unsigned vmid, bool ctx_switch)
+				   struct amdgpu_job *job,
+				   struct amdgpu_ib *ib,
+				   bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	/* IB packet must end on a 8 DW boundary */
 	sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+		rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-		ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+		WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+		ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+		WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 	}
 
-	sdma0->ready = false;
-	sdma1->ready = false;
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
 }
 
 /**
@@ -593,6 +649,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
 }
 
 /**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+	u32 rb_cntl, ib_cntl;
+	int i;
+
+	if ((adev->mman.buffer_funcs_ring == sdma0) ||
+	    (adev->mman.buffer_funcs_ring == sdma1))
+		amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+					RB_ENABLE, 0);
+		WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+		ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+					IB_ENABLE, 0);
+		WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+	}
+
+	sdma0->sched.ready = false;
+	sdma1->sched.ready = false;
+}
+
+/**
  * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
  *
  * @adev: amdgpu_device pointer
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+		f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 				AUTO_CTXSW_ENABLE, enable ? 1 : 0);
 		if (enable && amdgpu_sdma_phase_quantum) {
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
-			       phase_quantum);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
-			       phase_quantum);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
-			       phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+			WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
 		}
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+		WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
 	}
 
 }
@@ -662,156 +748,215 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
 	if (enable == false) {
 		sdma_v4_0_gfx_stop(adev);
 		sdma_v4_0_rlc_stop(adev);
+		if (adev->sdma.has_page_queue)
+			sdma_v4_0_page_stop(adev);
 	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+		f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+		WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
 	}
 }
 
 /**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+	/* Set ring buffer size in dwords */
+	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+				RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+	return rb_cntl;
+}
+
+/**
  * sdma_v4_0_gfx_resume - setup and start the async dma engines
  *
  * @adev: amdgpu_device pointer
+ * @i: instance to resume
  *
  * Set up the gfx DMA ring buffers and enable them (VEGA10).
  * Returns 0 for success, error for failure.
  */
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 {
-	struct amdgpu_ring *ring;
+	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
 	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
-	u32 rb_bufsz;
 	u32 wb_offset;
 	u32 doorbell;
 	u32 doorbell_offset;
-	u32 temp;
 	u64 wptr_gpu_addr;
-	int i, r;
 
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		ring = &adev->sdma.instance[i].ring;
-		wb_offset = (ring->rptr_offs * 4);
+	wb_offset = (ring->rptr_offs * 4);
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+	rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
 
-		/* Set ring buffer size in dwords */
-		rb_bufsz = order_base_2(ring->ring_size / 4);
-		rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
-					RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
 
-		/* Initialize the ring buffer's read and write pointers */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+	/* set the wb address whether it's enabled or not */
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 
-		/* set the wb address whether it's enabled or not */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
-		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
-		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+				RPTR_WRITEBACK_ENABLE, 1);
 
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+	ring->wptr = 0;
 
-		ring->wptr = 0;
+	/* before programing wptr to a less value, need set minor_ptr_update first */
+	WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
 
-		/* before programing wptr to a less value, need set minor_ptr_update first */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+	doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+	doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
 
-		if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
-		}
-
-		doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
-		doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
-		if (ring->use_doorbell) {
-			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
-			doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+	doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+				 ring->use_doorbell);
+	doorbell_offset = REG_SET_FIELD(doorbell_offset,
+					SDMA0_GFX_DOORBELL_OFFSET,
 					OFFSET, ring->doorbell_index);
-		} else {
-			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
-		}
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
-		adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
-						      ring->doorbell_index);
+	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+	WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+	adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+					      ring->doorbell_index);
 
-		if (amdgpu_sriov_vf(adev))
-			sdma_v4_0_ring_set_wptr(ring);
+	sdma_v4_0_ring_set_wptr(ring);
 
-		/* set minor_ptr_update to 0 after wptr programed */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+	/* set minor_ptr_update to 0 after wptr programed */
+	WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
 
-		/* set utc l1 enable flag always to 1 */
-		temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
-		temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+	/* setup the wptr shadow polling */
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+		    lower_32_bits(wptr_gpu_addr));
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+		    upper_32_bits(wptr_gpu_addr));
+	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+				       SDMA0_GFX_RB_WPTR_POLL_CNTL,
+				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
 
-		if (!amdgpu_sriov_vf(adev)) {
-			/* unhalt engine */
-			temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
-			temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
-		}
+	/* enable DMA RB */
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
 
-		/* setup the wptr shadow polling */
-		wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
-		       lower_32_bits(wptr_gpu_addr));
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
-		       upper_32_bits(wptr_gpu_addr));
-		wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
-		if (amdgpu_sriov_vf(adev))
-			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
-		else
-			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
-
-		/* enable DMA RB */
-		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
-		ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
-		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+	ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 #ifdef __BIG_ENDIAN
-		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 #endif
-		/* enable DMA IBs */
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+	/* enable DMA IBs */
+	WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 
-		ring->ready = true;
+	ring->sched.ready = true;
+}
 
-		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
-			sdma_v4_0_ctx_switch_enable(adev, true);
-			sdma_v4_0_enable(adev, true);
-		}
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+	u32 wb_offset;
+	u32 doorbell;
+	u32 doorbell_offset;
+	u64 wptr_gpu_addr;
 
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
-			return r;
-		}
+	wb_offset = (ring->rptr_offs * 4);
 
-		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_buffer_funcs_status(adev, true);
+	rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
 
-	}
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
 
-	return 0;
+	/* set the wb address whether it's enabled or not */
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+				RPTR_WRITEBACK_ENABLE, 1);
+
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
+
+	ring->wptr = 0;
+
+	/* before programing wptr to a less value, need set minor_ptr_update first */
+	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
+
+	doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+	doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
+
+	doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+				 ring->use_doorbell);
+	doorbell_offset = REG_SET_FIELD(doorbell_offset,
+					SDMA0_PAGE_DOORBELL_OFFSET,
+					OFFSET, ring->doorbell_index);
+	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+	WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+	/* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+	sdma_v4_0_page_ring_set_wptr(ring);
+
+	/* set minor_ptr_update to 0 after wptr programed */
+	WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+	/* setup the wptr shadow polling */
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+		    lower_32_bits(wptr_gpu_addr));
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+		    upper_32_bits(wptr_gpu_addr));
+	wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+				       SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+	/* enable DMA RB */
+	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+	WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+	ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+	/* enable DMA IBs */
+	WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+
+	ring->sched.ready = true;
 }
 
 static void
@@ -922,12 +1067,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
 			(adev->sdma.instance[i].fw->data +
 				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+		WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
 
 		for (j = 0; j < fw_size; j++)
-			WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+			WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+				    le32_to_cpup(fw_data++));
 
-		WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+		WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+			    adev->sdma.instance[i].fw_version);
 	}
 
 	return 0;
@@ -943,33 +1090,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
  */
 static int sdma_v4_0_start(struct amdgpu_device *adev)
 {
-	int r = 0;
+	struct amdgpu_ring *ring;
+	int i, r;
 
 	if (amdgpu_sriov_vf(adev)) {
 		sdma_v4_0_ctx_switch_enable(adev, false);
 		sdma_v4_0_enable(adev, false);
+	} else {
 
-		/* set RB registers */
-		r = sdma_v4_0_gfx_resume(adev);
-		return r;
+		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+			r = sdma_v4_0_load_microcode(adev);
+			if (r)
+				return r;
+		}
+
+		/* unhalt the MEs */
+		sdma_v4_0_enable(adev, true);
+		/* enable sdma ring preemption */
+		sdma_v4_0_ctx_switch_enable(adev, true);
 	}
 
-	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-		r = sdma_v4_0_load_microcode(adev);
+	/* start the gfx rings and rlc compute queues */
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		uint32_t temp;
+
+		WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+		sdma_v4_0_gfx_resume(adev, i);
+		if (adev->sdma.has_page_queue)
+			sdma_v4_0_page_resume(adev, i);
+
+		/* set utc l1 enable flag always to 1 */
+		temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+		temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+		WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+		if (!amdgpu_sriov_vf(adev)) {
+			/* unhalt engine */
+			temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+			temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+			WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+		}
+	}
+
+	if (amdgpu_sriov_vf(adev)) {
+		sdma_v4_0_ctx_switch_enable(adev, true);
+		sdma_v4_0_enable(adev, true);
+	} else {
+		r = sdma_v4_0_rlc_resume(adev);
 		if (r)
 			return r;
 	}
 
-	/* unhalt the MEs */
-	sdma_v4_0_enable(adev, true);
-	/* enable sdma ring preemption */
-	sdma_v4_0_ctx_switch_enable(adev, true);
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
 
-	/* start the gfx rings and rlc compute queues */
-	r = sdma_v4_0_gfx_resume(adev);
-	if (r)
-		return r;
-	r = sdma_v4_0_rlc_resume(adev);
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
+			return r;
+
+		if (adev->sdma.has_page_queue) {
+			struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+			r = amdgpu_ring_test_helper(page);
+			if (r)
+				return r;
+
+			if (adev->mman.buffer_funcs_ring == page)
+				amdgpu_ttm_set_buffer_funcs_status(adev, true);
+		}
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
+	}
 
 	return r;
 }
@@ -993,21 +1185,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 5);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1211,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -1055,20 +1238,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1266,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
+
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -1206,7 +1381,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
 	u32 pad_count;
 	int i;
 
@@ -1272,15 +1447,46 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
 	sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
 }
 
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+	uint fw_version = adev->sdma.instance[0].fw_version;
+
+	switch (adev->asic_type) {
+	case CHIP_VEGA10:
+		return fw_version >= 430;
+	case CHIP_VEGA12:
+		/*return fw_version >= 31;*/
+		return false;
+	case CHIP_VEGA20:
+		/*return fw_version >= 115;*/
+		return false;
+	default:
+		return false;
+	}
+}
+
 static int sdma_v4_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
 
 	if (adev->asic_type == CHIP_RAVEN)
 		adev->sdma.num_instances = 1;
 	else
 		adev->sdma.num_instances = 2;
 
+	r = sdma_v4_0_init_microcode(adev);
+	if (r) {
+		DRM_ERROR("Failed to load sdma firmware!\n");
+		return r;
+	}
+
+	/* TODO: Page queue breaks driver reload under SRIOV */
+	if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+		adev->sdma.has_page_queue = false;
+	else if (sdma_v4_0_fw_support_paging_queue(adev))
+		adev->sdma.has_page_queue = true;
+
 	sdma_v4_0_set_ring_funcs(adev);
 	sdma_v4_0_set_buffer_funcs(adev);
 	sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1289,7 +1495,6 @@ static int sdma_v4_0_early_init(void *handle)
 	return 0;
 }
 
-
 static int sdma_v4_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
@@ -1308,12 +1513,6 @@ static int sdma_v4_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = sdma_v4_0_init_microcode(adev);
-	if (r) {
-		DRM_ERROR("Failed to load sdma firmware!\n");
-		return r;
-	}
-
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		ring = &adev->sdma.instance[i].ring;
 		ring->ring_obj = NULL;
@@ -1322,15 +1521,10 @@ static int sdma_v4_0_sw_init(void *handle)
 		DRM_INFO("use_doorbell being set to: [%s]\n",
 				ring->use_doorbell?"true":"false");
 
-		if (adev->asic_type == CHIP_VEGA10)
-			ring->doorbell_index = (i == 0) ?
-				(AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
-				: (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-		else
-			ring->doorbell_index = (i == 0) ?
-				(AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
-				: (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+		/* doorbell size is 2 dwords, get DWORD offset */
+		ring->doorbell_index = (i == 0) ?
+			(adev->doorbell_index.sdma_engine0 << 1)
+			: (adev->doorbell_index.sdma_engine1 << 1);
 
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
@@ -1340,6 +1534,29 @@ static int sdma_v4_0_sw_init(void *handle)
 				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
+
+		if (adev->sdma.has_page_queue) {
+			ring = &adev->sdma.instance[i].page;
+			ring->ring_obj = NULL;
+			ring->use_doorbell = true;
+
+			/* paging queue use same doorbell index/routing as gfx queue
+			 * with 0x400 (4096 dwords) offset on second doorbell page
+			 */
+			ring->doorbell_index = (i == 0) ?
+				(adev->doorbell_index.sdma_engine0 << 1)
+				: (adev->doorbell_index.sdma_engine1 << 1);
+			ring->doorbell_index += 0x400;
+
+			sprintf(ring->name, "page%d", i);
+			r = amdgpu_ring_init(adev, ring, 1024,
+					     &adev->sdma.trap_irq,
+					     (i == 0) ?
+					     AMDGPU_SDMA_IRQ_TRAP0 :
+					     AMDGPU_SDMA_IRQ_TRAP1);
+			if (r)
+				return r;
+		}
 	}
 
 	return r;
@@ -1350,8 +1567,11 @@ static int sdma_v4_0_sw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int i;
 
-	for (i = 0; i < adev->sdma.num_instances; i++)
+	for (i = 0; i < adev->sdma.num_instances; i++) {
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+		if (adev->sdma.has_page_queue)
+			amdgpu_ring_fini(&adev->sdma.instance[i].page);
+	}
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1634,7 @@ static bool sdma_v4_0_is_idle(void *handle)
 	u32 i;
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+		u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
 
 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
 			return false;
@@ -1430,8 +1650,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
-		sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+		sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+		sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
 
 		if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
 			return 0;
@@ -1452,16 +1672,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
 					unsigned type,
 					enum amdgpu_interrupt_state state)
 {
+	unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
 	u32 sdma_cntl;
 
-	u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
-		sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
-		sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
-	sdma_cntl = RREG32(reg_offset);
+	sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
 	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
 		       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-	WREG32(reg_offset, sdma_cntl);
+	WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
 
 	return 0;
 }
@@ -1470,39 +1687,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
 				      struct amdgpu_irq_src *source,
 				      struct amdgpu_iv_entry *entry)
 {
+	uint32_t instance;
+
 	DRM_DEBUG("IH: SDMA trap\n");
 	switch (entry->client_id) {
 	case SOC15_IH_CLIENTID_SDMA0:
-		switch (entry->ring_id) {
-		case 0:
-			amdgpu_fence_process(&adev->sdma.instance[0].ring);
-			break;
-		case 1:
-			/* XXX compute */
-			break;
-		case 2:
-			/* XXX compute */
-			break;
-		case 3:
-			/* XXX page queue*/
-			break;
-		}
+		instance = 0;
 		break;
 	case SOC15_IH_CLIENTID_SDMA1:
-		switch (entry->ring_id) {
-		case 0:
-			amdgpu_fence_process(&adev->sdma.instance[1].ring);
-			break;
-		case 1:
-			/* XXX compute */
-			break;
-		case 2:
-			/* XXX compute */
-			break;
-		case 3:
-			/* XXX page queue*/
-			break;
-		}
+		instance = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (entry->ring_id) {
+	case 0:
+		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+		break;
+	case 1:
+		/* XXX compute */
+		break;
+	case 2:
+		/* XXX compute */
+		break;
+	case 3:
+		amdgpu_fence_process(&adev->sdma.instance[instance].page);
 		break;
 	}
 	return 0;
@@ -1512,12 +1722,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
 					      struct amdgpu_irq_src *source,
 					      struct amdgpu_iv_entry *entry)
 {
+	int instance;
+
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
+
+	switch (entry->client_id) {
+	case SOC15_IH_CLIENTID_SDMA0:
+		instance = 0;
+		break;
+	case SOC15_IH_CLIENTID_SDMA1:
+		instance = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (entry->ring_id) {
+	case 0:
+		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+		break;
+	}
 	return 0;
 }
 
-
 static void sdma_v4_0_update_medium_grain_clock_gating(
 		struct amdgpu_device *adev,
 		bool enable)
@@ -1730,6 +1957,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+	.support_64bit_ptrs = true,
+	.vmhub = AMDGPU_MMHUB,
+	.get_rptr = sdma_v4_0_ring_get_rptr,
+	.get_wptr = sdma_v4_0_page_ring_get_wptr,
+	.set_wptr = sdma_v4_0_page_ring_set_wptr,
+	.emit_frame_size =
+		6 + /* sdma_v4_0_ring_emit_hdp_flush */
+		3 + /* hdp invalidate */
+		6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+		/* sdma_v4_0_ring_emit_vm_flush */
+		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+		10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+	.emit_ib = sdma_v4_0_ring_emit_ib,
+	.emit_fence = sdma_v4_0_ring_emit_fence,
+	.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+	.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+	.test_ring = sdma_v4_0_ring_test_ring,
+	.test_ib = sdma_v4_0_ring_test_ib,
+	.insert_nop = sdma_v4_0_ring_insert_nop,
+	.pad_ib = sdma_v4_0_ring_pad_ib,
+	.emit_wreg = sdma_v4_0_ring_emit_wreg,
+	.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 {
 	int i;
@@ -1737,6 +1996,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
 		adev->sdma.instance[i].ring.me = i;
+		if (adev->sdma.has_page_queue) {
+			adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+			adev->sdma.instance[i].page.me = i;
+		}
 	}
 }
 
@@ -1818,7 +2081,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
 	adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
-	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+	if (adev->sdma.has_page_queue)
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+	else
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
 
 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2102,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
+		if (adev->sdma.has_page_queue)
+			sched = &adev->sdma.instance[i].page.sched;
+		else
+			sched = &adev->sdma.instance[i].ring.sched;
 		adev->vm_manager.vm_pte_rqs[i] =
 			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index adbaea6..b6e4731 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 }
 
 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_job *job,
 				struct amdgpu_ib *ib,
-				unsigned vmid, bool ctx_switch)
+				bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
 	 * Pad as necessary with NOPs.
 	 */
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
-		ring->ready = false;
+		ring->sched.ready = false;
 	}
 }
 
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
 		WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-		ring->ready = true;
+		ring->sched.ready = true;
 
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
 	u64 gpu_addr;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
 	r = amdgpu_ring_alloc(ring, 4);
-	if (r) {
-		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
-		amdgpu_device_wb_free(adev, index);
-		return r;
-	}
+	if (r)
+		goto error_free_wb;
 
 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_device_wb_free(adev, index);
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
 
+error_free_wb:
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = amdgpu_device_wb_get(adev, &index);
-	if (r) {
-		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+	if (r)
 		return r;
-	}
 
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	if (r)
 		goto err0;
-	}
 
 	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
 	ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		goto err1;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err1;
 	}
 	tmp = le32_to_cpu(adev->wb.wb[index]);
-	if (tmp == 0xDEADBEEF) {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	if (tmp == 0xDEADBEEF)
 		r = 0;
-	} else {
-		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+	else
 		r = -EINVAL;
-	}
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
-					      struct amdgpu_irq_src *source,
-					      struct amdgpu_iv_entry *entry)
-{
-	DRM_ERROR("Illegal instruction in SDMA command stream\n");
-	schedule_work(&adev->reset_work);
-	return 0;
-}
-
 static int si_dma_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
 	.process = si_dma_process_trap_irq,
 };
 
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
-	.process = si_dma_process_illegal_inst_irq,
-};
-
 static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 {
 	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
 	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
-	adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f..2938fb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
 	return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	/* Process all interrupts */
-	return true;
-}
-
 static void si_ih_decode_iv(struct amdgpu_device *adev,
 			     struct amdgpu_iv_entry *entry)
 {
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs si_ih_funcs = {
 	.get_wptr = si_ih_get_wptr,
-	.prescreen_iv = si_ih_prescreen_iv,
 	.decode_iv = si_ih_decode_iv,
 	.set_rptr = si_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb..8849b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		return -EINVAL;
 	}
 
+	if (adev->asic_type == CHIP_VEGA20)
+		adev->gmc.xgmi.supported = true;
+
 	if (adev->flags & AMD_IS_APU)
 		adev->nbio_funcs = &nbio_v7_0_funcs;
 	else if (adev->asic_type == CHIP_VEGA20)
@@ -613,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 	.flush_hdp = &soc15_flush_hdp,
 	.invalidate_hdp = &soc15_invalidate_hdp,
 	.need_full_reset = &soc15_need_full_reset,
+	.init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+	.read_disabled_bios = &soc15_read_disabled_bios,
+	.read_bios_from_rom = &soc15_read_bios_from_rom,
+	.read_register = &soc15_read_register,
+	.reset = &soc15_asic_reset,
+	.set_vga_state = &soc15_vga_set_state,
+	.get_xclk = &soc15_get_xclk,
+	.set_uvd_clocks = &soc15_set_uvd_clocks,
+	.set_vce_clocks = &soc15_set_vce_clocks,
+	.get_config_memsize = &soc15_get_config_memsize,
+	.flush_hdp = &soc15_flush_hdp,
+	.invalidate_hdp = &soc15_invalidate_hdp,
+	.need_full_reset = &soc15_need_full_reset,
+	.init_doorbell_index = &vega20_doorbell_index_init,
 };
 
 static int soc15_common_early_init(void *handle)
@@ -632,11 +653,11 @@ static int soc15_common_early_init(void *handle)
 	adev->se_cac_rreg = &soc15_se_cac_rreg;
 	adev->se_cac_wreg = &soc15_se_cac_wreg;
 
-	adev->asic_funcs = &soc15_asic_funcs;
 
 	adev->external_rev_id = 0xFF;
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
+		adev->asic_funcs = &soc15_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +681,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = 0x1;
 		break;
 	case CHIP_VEGA12:
+		adev->asic_funcs = &soc15_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +704,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_VEGA20:
+		adev->asic_funcs = &vega20_asic_funcs;
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |
 			AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,6 +727,7 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = adev->rev_id + 0x28;
 		break;
 	case CHIP_RAVEN:
+		adev->asic_funcs = &soc15_asic_funcs;
 		if (adev->rev_id >= 0x8)
 			adev->external_rev_id = adev->rev_id + 0x81;
 		else if (adev->pdev->device == 0x15d8)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad780..a66c8bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
 int vega10_reg_base_init(struct amdgpu_device *adev);
 int vega20_reg_base_init(struct amdgpu_device *adev);
 
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644
index 0000000..ac2c27b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+	TA_COMMAND_XGMI__INITIALIZE			= 0x00,
+	TA_COMMAND_XGMI__GET_NODE_ID			= 0x01,
+	TA_COMMAND_XGMI__GET_HIVE_ID			= 0x02,
+	TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO		= 0x03,
+	TA_COMMAND_XGMI__SET_TOPOLOGY_INFO		= 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+	TA_XGMI__MAX_CONNECTED_NODES			= 64
+};
+
+enum ta_xgmi_status {
+	TA_XGMI_STATUS__SUCCESS				= 0x00,
+	TA_XGMI_STATUS__GENERIC_FAILURE			= 0x01,
+	TA_XGMI_STATUS__NULL_POINTER			= 0x02,
+	TA_XGMI_STATUS__INVALID_PARAMETER		= 0x03,
+	TA_XGMI_STATUS__NOT_INITIALIZED			= 0x04,
+	TA_XGMI_STATUS__INVALID_NODE_NUM		= 0x05,
+	TA_XGMI_STATUS__INVALID_NODE_ID			= 0x06,
+	TA_XGMI_STATUS__INVALID_TOPOLOGY		= 0x07,
+	TA_XGMI_STATUS__FAILED_ID_GEN			= 0x08,
+	TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT		= 0x09,
+	TA_XGMI_STATUS__SET_SHARING_ERROR		= 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED	= -1,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0		= 0,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1		= 1,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2		= 2,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3		= 3,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4		= 4,
+	TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5		= 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+	uint64_t				node_id;
+	uint8_t					num_hops;
+	uint8_t					is_sharing_enabled;
+	enum ta_xgmi_assigned_sdma_engine	sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+	uint32_t	status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+	uint64_t	node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+	uint64_t	hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+	uint32_t			num_nodes;
+	struct ta_xgmi_node_info	nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+	struct ta_xgmi_cmd_get_topology_info_input	get_topology_info;
+	struct ta_xgmi_cmd_set_topology_info_input	set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+	struct ta_xgmi_cmd_initialize_output		initialize;
+	struct ta_xgmi_cmd_get_node_id_output		get_node_id;
+	struct ta_xgmi_cmd_get_hive_id_output		get_hive_id;
+	struct ta_xgmi_cmd_get_topology_info_output	get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+	uint32_t			cmd_id;
+	uint32_t			resp_id;
+	enum ta_xgmi_status		xgmi_status;
+	uint32_t			reserved;
+	union ta_xgmi_cmd_input		xgmi_in_message;
+	union ta_xgmi_cmd_output	xgmi_out_message;
+};
+
+#endif   //_TA_XGMI_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd0..15da06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
 }
 
 /**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	u32 ring_index = adev->irq.ih.rptr >> 2;
-	u16 pasid;
-
-	switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-	case 146:
-	case 147:
-		pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-		if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-			return true;
-		break;
-	default:
-		/* Not a VM fault */
-		return true;
-	}
-
-	adev->irq.ih.rptr += 16;
-	return false;
-}
-
-/**
  * tonga_ih_decode_iv - decode an interrupt vector
  *
  * @adev: amdgpu_device pointer
@@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle)
 		return r;
 
 	adev->irq.ih.use_doorbell = true;
-	adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+	adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
 
 	r = amdgpu_irq_init(adev);
 
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs tonga_ih_funcs = {
 	.get_wptr = tonga_ih_get_wptr,
-	.prescreen_iv = tonga_ih_prescreen_iv,
 	.decode_iv = tonga_ih_decode_iv,
 	.set_rptr = tonga_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1fc17bf..d69c8f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
 	uvd_v4_2_enable_mgcg(adev, true);
 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v4_2_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
 	amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index fde6ad5..ee8cd06 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
 	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 	uvd_v5_0_enable_mgcg(adev, true);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v5_0_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7a5b402..d4f4a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 	int r;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
-	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
 }
+
 static int uvd_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
 		DRM_INFO("UVD ENC is disabled\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 	uvd_v6_0_enable_mgcg(adev, true);
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 		}
 	}
 
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v6_0_stop(adev);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
 	amdgpu_ring_write(ring, vmid);
 
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 58b39af..089645e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		return 0;
 
 	r = amdgpu_ring_alloc(ring, 16);
-	if (r) {
-		DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
-			  ring->me, ring->idx, r);
+	if (r)
 		return r;
-	}
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 	amdgpu_ring_commit(ring);
 
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-			 ring->me, ring->idx, i);
-	} else {
-		DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
-			  ring->me, ring->idx);
+	if (i >= adev->usec_timeout)
 		r = -ETIMEDOUT;
-	}
 
 	return r;
 }
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	long r;
 
 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+	if (r)
 		goto error;
-	}
 
 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+	if (r)
 		goto error;
-	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
-	if (r == 0) {
-		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+	if (r == 0)
 		r = -ETIMEDOUT;
-	} else if (r < 0) {
-		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
-	} else {
-		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+	else if (r > 0)
 		r = 0;
-	}
+
 error:
 	dma_fence_put(fence);
 	return r;
@@ -447,10 +430,6 @@ static int uvd_v7_0_sw_init(void *handle)
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		if (adev->uvd.harvest_config & (1 << j))
 			continue;
@@ -472,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
 				 * sriov, so set unused location for other unused rings.
 				 */
 				if (i == 0)
-					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 				else
-					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 			}
 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
 			if (r)
@@ -482,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
 		}
 	}
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 	if (r)
 		return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
 		ring = &adev->uvd.inst[j].ring;
 
 		if (!amdgpu_sriov_vf(adev)) {
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 
 			r = amdgpu_ring_alloc(ring, 10);
 			if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
 
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst[j].ring_enc[i];
-			ring->ready = true;
-			r = amdgpu_ring_test_ring(ring);
-			if (r) {
-				ring->ready = false;
+			r = amdgpu_ring_test_helper(ring);
+			if (r)
 				goto done;
-			}
 		}
 	}
 done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 		if (adev->uvd.harvest_config & (1 << i))
 			continue;
-		adev->uvd.inst[i].ring.ready = false;
+		adev->uvd.inst[i].ring.sched.ready = false;
 	}
 
 	return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 
 	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
-	if (r) {
-		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
-			  ring->me, ring->idx, r);
+	if (r)
 		return r;
-	}
+
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 		DRM_UDELAY(1);
 	}
 
-	if (i < adev->usec_timeout) {
-		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
-			 ring->me, ring->idx, i);
-	} else {
-		DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->me, ring->idx, tmp);
-		r = -EINVAL;
-	}
+	if (i >= adev->usec_timeout)
+		r = -ETIMEDOUT;
+
 	return r;
 }
 
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_job *job,
 				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+				  bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index ea28828..bed78a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
 
 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 	vce_v2_0_enable_mgcg(adev, true, false);
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
 
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd397..2668eff 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_sh_mask.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
 
@@ -474,15 +473,10 @@ static int vce_v3_0_hw_init(void *handle)
 
 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
-
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +832,12 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
 }
 
 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+				  struct amdgpu_job *job,
+				  struct amdgpu_ib *ib,
+				  bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1c94718..9fb34b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
 			 * so set unused location for other unused rings.
 			 */
 			if (i == 0)
-				ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
 			else
-				ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
 		}
 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
 		if (r)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
 	if (r)
 		return r;
 
-	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
-
 	for (i = 0; i < adev->vce.num_rings; i++) {
-		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 		if (r)
 			return r;
-		else
-			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
 	}
 
 	for (i = 0; i < adev->vce.num_rings; i++)
-		adev->vce.ring[i].ready = false;
+		adev->vce.ring[i].sched.ready = false;
 
 	return 0;
 }
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
 }
 #endif
 
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+					struct amdgpu_ib *ib, bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 322e09b..4f83520 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -177,30 +177,22 @@ static int vcn_v1_0_hw_init(void *handle)
 	struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 	int i, r;
 
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 		ring = &adev->vcn.ring_enc[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
+		ring->sched.ready = true;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			goto done;
-		}
 	}
 
 	ring = &adev->vcn.ring_jpeg;
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
+	r = amdgpu_ring_test_helper(ring);
+	if (r)
 		goto done;
-	}
 
 done:
 	if (!r)
@@ -225,7 +217,7 @@ static int vcn_v1_0_hw_fini(void *handle)
 	if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
 		vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-	ring->ready = false;
+	ring->sched.ready = false;
 
 	return 0;
 }
@@ -1367,10 +1359,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
  * Write ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1525,8 +1519,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
  * Write enc ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
 	amdgpu_ring_write(ring, vmid);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1726,10 +1724,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
  * Write ring commands to execute the indirect buffer.
  */
 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib,
-				  unsigned vmid, bool ctx_switch)
+					struct amdgpu_job *job,
+					struct amdgpu_ib *ib,
+					bool ctx_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
+	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
 	amdgpu_ring_write(ring,
 		PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f..2c250b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
 }
 
 /**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-	u32 ring_index = adev->irq.ih.rptr >> 2;
-	u32 dw0, dw3, dw4, dw5;
-	u16 pasid;
-	u64 addr, key;
-	struct amdgpu_vm *vm;
-	int r;
-
-	dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
-	dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
-	dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
-	dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
-	/* Filter retry page faults, let only the first one pass. If
-	 * there are too many outstanding faults, ignore them until
-	 * some faults get cleared.
-	 */
-	switch (dw0 & 0xff) {
-	case SOC15_IH_CLIENTID_VMC:
-	case SOC15_IH_CLIENTID_UTCL2:
-		break;
-	default:
-		/* Not a VM fault */
-		return true;
-	}
-
-	pasid = dw3 & 0xffff;
-	/* No PASID, can't identify faulting process */
-	if (!pasid)
-		return true;
-
-	/* Not a retry fault, check fault credit */
-	if (!(dw5 & 0x80)) {
-		if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
-			goto ignore_iv;
-		return true;
-	}
-
-	/* Track retry faults in per-VM fault FIFO. */
-	spin_lock(&adev->vm_manager.pasid_lock);
-	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
-	addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
-	key = AMDGPU_VM_FAULT(pasid, addr);
-	if (!vm) {
-		/* VM not found, process it normally */
-		spin_unlock(&adev->vm_manager.pasid_lock);
-		return true;
-	} else {
-		r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
-		/* Hash table is full or the fault is already being processed,
-		 * ignore further page faults
-		 */
-		if (r != 0) {
-			spin_unlock(&adev->vm_manager.pasid_lock);
-			goto ignore_iv;
-		}
-	}
-	/* No locking required with single writer and single reader */
-	r = kfifo_put(&vm->faults, key);
-	if (!r) {
-		/* FIFO is full. Ignore it until there is space */
-		amdgpu_vm_clear_fault(vm->fault_hash, key);
-		spin_unlock(&adev->vm_manager.pasid_lock);
-		goto ignore_iv;
-	}
-
-	spin_unlock(&adev->vm_manager.pasid_lock);
-	/* It's the first fault for this address, process it normally */
-	return true;
-
-ignore_iv:
-	adev->irq.ih.rptr += 32;
-	return false;
-}
-
-/**
  * vega10_ih_decode_iv - decode an interrupt vector
  *
  * @adev: amdgpu_device pointer
@@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle)
 		return r;
 
 	adev->irq.ih.use_doorbell = true;
-	adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
 
 	r = amdgpu_irq_init(adev);
 
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
 	.get_wptr = vega10_ih_get_wptr,
-	.prescreen_iv = vega10_ih_prescreen_iv,
 	.decode_iv = vega10_ih_decode_iv,
 	.set_rptr = vega10_ih_set_rptr
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2b..422674b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+	adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+	adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+	adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+	adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+	adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+	adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+	adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+	adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+	adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+	/* In unit of dword doorbell */
+	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4f..edce413 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+	adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+	adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+	adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+	adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+	adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+	adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+	adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+	adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+	adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+	adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+	adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+	adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+	adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+	adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+	adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+	adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+	adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+	adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d3..ff2906c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
 	.flush_hdp = &vi_flush_hdp,
 	.invalidate_hdp = &vi_invalidate_hdp,
 	.need_full_reset = &vi_need_full_reset,
+	.init_doorbell_index = &legacy_doorbell_index_init,
 };
 
 #define CZ_REV_BRISTOL(rev)	 \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 
 	return 0;
 }
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+	adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+	adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe3..8de0772 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
 		    u32 me, u32 pipe, u32 queue, u32 vmid);
 int vi_set_ip_blocks(struct amdgpu_device *adev);
 
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5d2475d..177d1e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -23,6 +23,7 @@
 #include "kfd_priv.h"
 #include "kfd_events.h"
 #include "cik_int.h"
+#include "amdgpu_amdkfd.h"
 
 static bool cik_event_interrupt_isr(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry,
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
 		kfd_process_vm_fault(dev->dqm, pasid);
 
 		memset(&info, 0, sizeof(info));
-		dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+		amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
 		if (!info.page_addr && !info.status)
 			return;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 37ce6dd..8e2a166 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -68,6 +68,4 @@
 
 #define GRBM_GFX_INDEX					0x30800
 
-#define	ATC_VMID_PASID_MAPPING_VALID			(1U << 31)
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 14d5b5f..3623538 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -33,10 +33,12 @@
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
+#include <linux/dma-buf.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_dbgmgr.h"
+#include "amdgpu_amdkfd.h"
 
 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
 static int kfd_open(struct inode *, struct file *);
@@ -834,8 +836,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
 	dev = kfd_device_by_id(args->gpu_id);
 	if (dev)
 		/* Reading GPU clock counter from KGD */
-		args->gpu_clock_counter =
-			dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
 	else
 		/* Node without GPU resource */
 		args->gpu_clock_counter = 0;
@@ -1042,7 +1043,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
 		}
 		mutex_unlock(&p->mutex);
 
-		err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
 						mem, &kern_addr, &size);
 		if (err) {
 			pr_err("Failed to map event page to kernel\n");
@@ -1240,7 +1241,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
 	if (dev->device_info->needs_iommu_device)
 		return false;
 
-	dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+	amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
 	if (mem_info.local_mem_size_private == 0 &&
 			mem_info.local_mem_size_public > 0)
 		return true;
@@ -1273,6 +1274,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 		return -EINVAL;
 	}
 
+	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+		if (args->size != kfd_doorbell_process_slice(dev))
+			return -EINVAL;
+		offset = kfd_get_process_doorbells(dev, p);
+	}
+
 	mutex_lock(&p->mutex);
 
 	pdd = kfd_bind_process_to_device(dev, p);
@@ -1281,7 +1288,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 		goto err_unlock;
 	}
 
-	err = dev->kfd2kgd->alloc_memory_of_gpu(
+	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 		dev->kgd, args->va_addr, args->size,
 		pdd->vm, (struct kgd_mem **) &mem, &offset,
 		flags);
@@ -1303,7 +1310,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
 	return 0;
 
 err_free:
-	dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
 err_unlock:
 	mutex_unlock(&p->mutex);
 	return err;
@@ -1338,7 +1345,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
 		goto err_unlock;
 	}
 
-	ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+						(struct kgd_mem *)mem);
 
 	/* If freeing the buffer failed, leave the handle in place for
 	 * clean-up during process tear-down.
@@ -1418,7 +1426,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 			err = PTR_ERR(peer_pdd);
 			goto get_mem_obj_from_handle_failed;
 		}
-		err = peer->kfd2kgd->map_memory_to_gpu(
+		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
 		if (err) {
 			pr_err("Failed to map to gpu %d/%d\n",
@@ -1430,7 +1438,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 
 	mutex_unlock(&p->mutex);
 
-	err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
 	if (err) {
 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
 		goto sync_memory_failed;
@@ -1525,7 +1533,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 			err = -ENODEV;
 			goto get_mem_obj_from_handle_failed;
 		}
-		err = dev->kfd2kgd->unmap_memory_to_gpu(
+		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
 		if (err) {
 			pr_err("Failed to unmap from gpu %d/%d\n",
@@ -1549,6 +1557,115 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 	return err;
 }
 
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+		struct kfd_process *p, void *data)
+{
+	struct kfd_ioctl_get_dmabuf_info_args *args = data;
+	struct kfd_dev *dev = NULL;
+	struct kgd_dev *dma_buf_kgd;
+	void *metadata_buffer = NULL;
+	uint32_t flags;
+	unsigned int i;
+	int r;
+
+	/* Find a KFD GPU device that supports the get_dmabuf_info query */
+	for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+		if (dev)
+			break;
+	if (!dev)
+		return -EINVAL;
+
+	if (args->metadata_ptr) {
+		metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+		if (!metadata_buffer)
+			return -ENOMEM;
+	}
+
+	/* Get dmabuf info from KGD */
+	r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+					  &dma_buf_kgd, &args->size,
+					  metadata_buffer, args->metadata_size,
+					  &args->metadata_size, &flags);
+	if (r)
+		goto exit;
+
+	/* Reverse-lookup gpu_id from kgd pointer */
+	dev = kfd_device_by_kgd(dma_buf_kgd);
+	if (!dev) {
+		r = -EINVAL;
+		goto exit;
+	}
+	args->gpu_id = dev->id;
+	args->flags = flags;
+
+	/* Copy metadata buffer to user mode */
+	if (metadata_buffer) {
+		r = copy_to_user((void __user *)args->metadata_ptr,
+				 metadata_buffer, args->metadata_size);
+		if (r != 0)
+			r = -EFAULT;
+	}
+
+exit:
+	kfree(metadata_buffer);
+
+	return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+				   struct kfd_process *p, void *data)
+{
+	struct kfd_ioctl_import_dmabuf_args *args = data;
+	struct kfd_process_device *pdd;
+	struct dma_buf *dmabuf;
+	struct kfd_dev *dev;
+	int idr_handle;
+	uint64_t size;
+	void *mem;
+	int r;
+
+	dev = kfd_device_by_id(args->gpu_id);
+	if (!dev)
+		return -EINVAL;
+
+	dmabuf = dma_buf_get(args->dmabuf_fd);
+	if (!dmabuf)
+		return -EINVAL;
+
+	mutex_lock(&p->mutex);
+
+	pdd = kfd_bind_process_to_device(dev, p);
+	if (IS_ERR(pdd)) {
+		r = PTR_ERR(pdd);
+		goto err_unlock;
+	}
+
+	r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+					      args->va_addr, pdd->vm,
+					      (struct kgd_mem **)&mem, &size,
+					      NULL);
+	if (r)
+		goto err_unlock;
+
+	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+	if (idr_handle < 0) {
+		r = -EFAULT;
+		goto err_free;
+	}
+
+	mutex_unlock(&p->mutex);
+
+	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+	return 0;
+
+err_free:
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+	mutex_unlock(&p->mutex);
+	return r;
+}
+
 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
 			    .cmd_drv = 0, .name = #ioctl}
@@ -1634,7 +1751,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
 			kfd_ioctl_set_cu_mask, 0),
 
 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
-			kfd_ioctl_get_queue_wave_state, 0)
+			kfd_ioctl_get_queue_wave_state, 0),
+
+	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+				kfd_ioctl_get_dmabuf_info, 0),
+
+	AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+				kfd_ioctl_import_dmabuf, 0),
 
 };
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 56412b0..c02adbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -26,6 +26,7 @@
 #include "kfd_priv.h"
 #include "kfd_topology.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
  * GPU processor ID are expressed with Bit[31]=1.
@@ -132,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
 #define fiji_cache_info  carrizo_cache_info
 #define polaris10_cache_info carrizo_cache_info
 #define polaris11_cache_info carrizo_cache_info
+#define polaris12_cache_info carrizo_cache_info
 /* TODO - check & update Vega10 cache details */
 #define vega10_cache_info carrizo_cache_info
 #define raven_cache_info carrizo_cache_info
@@ -646,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
 		pcache_info = polaris11_cache_info;
 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
 		break;
+	case CHIP_POLARIS12:
+		pcache_info = polaris12_cache_info;
+		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 		pcache_info = vega10_cache_info;
 		num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
@@ -753,12 +760,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 		return -ENODATA;
 	}
 
-	pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+	pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
 	if (!pcrat_image)
 		return -ENOMEM;
 
-	memcpy(pcrat_image, crat_table, crat_table->length);
-
 	*crat_image = pcrat_image;
 	*size = crat_table->length;
 
@@ -1161,7 +1166,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
 	cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
 	cu->proximity_domain = proximity_domain;
 
-	kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
 	cu->num_simd_per_cu = cu_info.simd_per_cu;
 	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
 	cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -1192,7 +1197,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
 	 * report the total FB size (public+private) as a single
 	 * private heap.
 	 */
-	kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
 			sub_type_hdr->length);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index e4ded89..8be9677 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -28,6 +28,7 @@
 #include "kfd_pm4_headers_vi.h"
 #include "cwsr_trap_handler.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 #define MQD_SIZE_ALIGNED 768
 
@@ -204,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = {
 	.num_sdma_queues_per_engine = 2,
 };
 
+static const struct kfd_device_info polaris12_device_info = {
+	.asic_family = CHIP_POLARIS12,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
+	.ih_ring_entry_size = 4 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_cik,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = true,
+	.num_sdma_engines = 2,
+	.num_sdma_queues_per_engine = 2,
+};
+
 static const struct kfd_device_info vega10_device_info = {
 	.asic_family = CHIP_VEGA10,
 	.max_pasid_bits = 16,
@@ -236,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
 	.num_sdma_queues_per_engine = 2,
 };
 
+static const struct kfd_device_info vega12_device_info = {
+	.asic_family = CHIP_VEGA12,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 8,
+	.ih_ring_entry_size = 8 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_v9,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = false,
+	.num_sdma_engines = 2,
+	.num_sdma_queues_per_engine = 2,
+};
+
 static const struct kfd_device_info vega20_device_info = {
 	.asic_family = CHIP_VEGA20,
 	.max_pasid_bits = 16,
@@ -330,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x67EB, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67EF, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67FF, &polaris11_device_info },	/* Polaris11 */
+	{ 0x6980, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6981, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6985, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6986, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6987, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6995, &polaris12_device_info },	/* Polaris12 */
+	{ 0x6997, &polaris12_device_info },	/* Polaris12 */
+	{ 0x699F, &polaris12_device_info },	/* Polaris12 */
 	{ 0x6860, &vega10_device_info },	/* Vega10 */
 	{ 0x6861, &vega10_device_info },	/* Vega10 */
 	{ 0x6862, &vega10_device_info },	/* Vega10 */
@@ -345,6 +386,11 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x686E, &vega10_device_info },	/* Vega10 */
 	{ 0x686F, &vega10_device_info },	/* Vega10 */
 	{ 0x687F, &vega10_device_info },	/* Vega10 */
+	{ 0x69A0, &vega12_device_info },	/* Vega12 */
+	{ 0x69A1, &vega12_device_info },	/* Vega12 */
+	{ 0x69A2, &vega12_device_info },	/* Vega12 */
+	{ 0x69A3, &vega12_device_info },	/* Vega12 */
+	{ 0x69AF, &vega12_device_info },	/* Vega12 */
 	{ 0x66a0, &vega20_device_info },	/* Vega20 */
 	{ 0x66a1, &vega20_device_info },	/* Vega20 */
 	{ 0x66a2, &vega20_device_info },	/* Vega20 */
@@ -485,7 +531,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 	/* add another 512KB for all other allocations on gart (HPD, fences) */
 	size += 512 * 1024;
 
-	if (kfd->kfd2kgd->init_gtt_mem_allocation(
+	if (amdgpu_amdkfd_alloc_gtt_mem(
 			kfd->kgd, size, &kfd->gtt_mem,
 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
 			false)) {
@@ -559,7 +605,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 kfd_doorbell_error:
 	kfd_gtt_sa_fini(kfd);
 kfd_gtt_sa_init_error:
-	kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+	amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	dev_err(kfd_device,
 		"device %x:%x NOT added due to errors\n",
 		kfd->pdev->vendor, kfd->pdev->device);
@@ -576,7 +622,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
 		kfd_topology_remove_device(kfd);
 		kfd_doorbell_fini(kfd);
 		kfd_gtt_sa_fini(kfd);
-		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+		amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	}
 
 	kfree(kfd);
@@ -688,6 +734,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
 	bool is_patched = false;
+	unsigned long flags;
 
 	if (!kfd->init_complete)
 		return;
@@ -697,7 +744,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 		return;
 	}
 
-	spin_lock(&kfd->interrupt_lock);
+	spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
 	if (kfd->interrupts_active
 	    && interrupt_is_wanted(kfd, ih_ring_entry,
@@ -706,7 +753,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 				     is_patched ? patched_ihre : ih_ring_entry))
 		queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
-	spin_unlock(&kfd->interrupt_lock);
+	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
 
 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index a3b9339..8372556 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -33,6 +33,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
 
 /* Size of the per-pipe EOP queue */
 #define CIK_HPD_EOP_BYTES_LOG2 11
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
 	if (ret)
 		return ret;
 
-	return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+	return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
 				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
 				pmf->release_mem_size / sizeof(uint32_t));
 }
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm,
 
 	pdd = qpd_to_pdd(qpd);
 	/* Retrieve PD base */
-	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 
 	dqm_lock(dqm);
 	list_add(&n->list, &dqm->queues);
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm,
 	retval = dqm->asic_ops.update_qpd(dqm, qpd);
 
 	if (dqm->processes_count++ == 0)
-		dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+		amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
 
 	dqm_unlock(dqm);
 
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm,
 			list_del(&cur->list);
 			kfree(cur);
 			if (--dqm->processes_count == 0)
-				dqm->dev->kfd2kgd->set_compute_idle(
+				amdgpu_amdkfd_set_compute_idle(
 					dqm->dev->kgd, true);
 			goto out;
 		}
@@ -845,15 +846,8 @@ static int
 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
 			unsigned int vmid)
 {
-	uint32_t pasid_mapping;
-
-	pasid_mapping = (pasid == 0) ? 0 :
-		(uint32_t)pasid |
-		ATC_VMID_PASID_MAPPING_VALID;
-
 	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
-						dqm->dev->kgd, pasid_mapping,
-						vmid);
+						dqm->dev->kgd, pasid, vmid);
 }
 
 static void init_interrupts(struct device_queue_manager *dqm)
@@ -1553,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
 			  u32 *ctl_stack_used_size,
 			  u32 *save_area_used_size)
 {
-	struct mqd_manager *mqd;
+	struct mqd_manager *mqd_mgr;
 	int r;
 
 	dqm_lock(dqm);
@@ -1564,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
 		goto dqm_unlock;
 	}
 
-	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
-	if (!mqd) {
+	mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+	if (!mqd_mgr) {
 		r = -ENOMEM;
 		goto dqm_unlock;
 	}
 
-	if (!mqd->get_wave_state) {
+	if (!mqd_mgr->get_wave_state) {
 		r = -EINVAL;
 		goto dqm_unlock;
 	}
 
-	r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
-				save_area_used_size);
+	r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+			ctl_stack_used_size, save_area_used_size);
 
 dqm_unlock:
 	dqm_unlock(dqm);
@@ -1747,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
 		break;
 
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		device_queue_manager_init_v9(&dqm->asic_ops);
@@ -1796,7 +1792,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
 {
 	struct device_queue_manager *dqm = container_of(work,
 			struct device_queue_manager, hw_exception_work);
-	dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+	amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index fd60a11..c3a5dcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -24,7 +24,6 @@
 #include "kfd_device_queue_manager.h"
 #include "gca/gfx_8_0_enum.h"
 #include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
 #include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 3d66cec..213ea54 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process)
 			case CHIP_FIJI:
 			case CHIP_POLARIS10:
 			case CHIP_POLARIS11:
+			case CHIP_POLARIS12:
 				kfd_init_apertures_vi(pdd, id);
 				break;
 			case CHIP_VEGA10:
+			case CHIP_VEGA12:
 			case CHIP_VEGA20:
 			case CHIP_RAVEN:
 				kfd_init_apertures_v9(pdd, id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f836897..a85904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,7 +23,7 @@
 #include "kfd_priv.h"
 #include "kfd_events.h"
 #include "soc15_int.h"
-
+#include "kfd_device_queue_manager.h"
 
 static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry,
@@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 	    vmid > dev->vm_info.last_vmid_kfd)
 		return 0;
 
-	/* If there is no valid PASID, it's likely a firmware bug */
-	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
-	if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
-		return 0;
-
 	source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
 	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
 
-	pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
-		 client_id, source_id, pasid);
+	/* This is a known issue for gfx9. Under non HWS, pasid is not set
+	 * in the interrupt payload, so we need to find out the pasid on our
+	 * own.
+	 */
+	if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+		const uint32_t pasid_mask = 0xffff;
+
+		*patched_flag = true;
+		memcpy(patched_ihre, ih_ring_entry,
+				dev->device_info->ih_ring_entry_size);
+
+		pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
+				dev->kgd, vmid);
+
+		/* Patch the pasid field */
+		patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
+					& ~pasid_mask) | pasid);
+	}
+
+	pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+		 client_id, source_id, vmid, pasid);
 	pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
 		 data[0], data[1], data[2], data[3],
 		 data[4], data[5], data[6], data[7]);
 
+	/* If there is no valid PASID, it's likely a bug */
+	if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+		return 0;
+
 	/* Interrupt types we care about: various signals and faults.
 	 * They will be forwarded to a work queue (see below).
 	 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 6c31f73..f159688 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		kernel_queue_init_vi(&kq->ops_asic_specific);
 		break;
 
@@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 		break;
 
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		kernel_queue_init_v9(&kq->ops_asic_specific);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index e33019a..aed9b9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -22,6 +22,7 @@
  */
 
 #include "kfd_mqd_manager.h"
+#include "amdgpu_amdkfd.h"
 
 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 					struct kfd_dev *dev)
@@ -37,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		return mqd_manager_init_vi_tonga(type, dev);
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		return mqd_manager_init_v9(type, dev);
@@ -58,7 +61,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
 	uint32_t cu_per_sh[4] = {0};
 	int i, se, cu = 0;
 
-	mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
 
 	if (cu_mask_count > cu_info.cu_active_number)
 		cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f381c1c..9dbba60 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -30,6 +30,7 @@
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
 #include "sdma0/sdma0_4_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
 
 static inline struct v9_mqd *get_mqd(void *mqd)
 {
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
 		*mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
 		if (!*mqd_mem_obj)
 			return -ENOMEM;
-		retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+		retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
 			ALIGN(q->ctl_stack_size, PAGE_SIZE) +
 				ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
 			&((*mqd_mem_obj)->gtt_mem),
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
 	struct kfd_dev *kfd = mm->dev;
 
 	if (mqd_mem_obj->gtt_mem) {
-		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+		amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
 		kfree(mqd_mem_obj);
 	} else {
 		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c6080ed3..045a229 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		pm->pmf = &kfd_vi_pm_funcs;
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 15fff44..33b08ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -22,6 +22,7 @@
 
 #include <linux/types.h>
 #include "kfd_priv.h"
+#include "amdgpu_ids.h"
 
 static unsigned int pasid_bits = 16;
 static const struct kfd2kgd_calls *kfd2kgd;
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void)
 			return false;
 	}
 
-	r = kfd2kgd->alloc_pasid(pasid_bits);
+	r = amdgpu_pasid_alloc(pasid_bits);
 
 	return r > 0 ? r : 0;
 }
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void)
 void kfd_pasid_free(unsigned int pasid)
 {
 	if (kfd2kgd)
-		kfd2kgd->free_pasid(pasid);
+		amdgpu_pasid_free(pasid);
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 53ff86d..0689d4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -507,6 +507,7 @@ struct qcm_process_device {
 	 * All the memory management data should be here too
 	 */
 	uint64_t gds_context_area;
+	/* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
 	uint64_t page_table_base;
 	uint32_t sh_mem_config;
 	uint32_t sh_mem_bases;
@@ -792,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
 int kfd_numa_node_to_apic_id(int numa_node_id);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 0039e45..80b36e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/mman.h>
 #include <linux/file.h>
+#include "amdgpu_amdkfd.h"
 
 struct mm_struct;
 
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
 {
 	struct kfd_dev *dev = pdd->dev;
 
-	dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
-	dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
 }
 
 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	int handle;
 	int err;
 
-	err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
 						 pdd->vm, &mem, NULL, flags);
 	if (err)
 		goto err_alloc_mem;
 
-	err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
 	if (err)
 		goto err_map_mem;
 
-	err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
 	if (err) {
 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
 		goto sync_memory_failed;
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	}
 
 	if (kptr) {
-		err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
 				(struct kgd_mem *)mem, kptr, NULL);
 		if (err) {
 			pr_debug("Map GTT BO to kernel failed\n");
@@ -165,7 +166,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 	return err;
 
 err_map_mem:
-	kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
 err_alloc_mem:
 	*kptr = NULL;
 	return err;
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 				    per_device_list) {
 			if (!peer_pdd->vm)
 				continue;
-			peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 				peer_pdd->dev->kgd, mem, peer_pdd->vm);
 		}
 
-		pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
 		kfd_process_device_remove_obj_handle(pdd, id);
 	}
 }
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
 				pdd->dev->id, p->pasid);
 
 		if (pdd->drm_file) {
-			pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+			amdgpu_amdkfd_gpuvm_release_process_vm(
+					pdd->dev->kgd, pdd->vm);
 			fput(pdd->drm_file);
 		}
 		else if (pdd->vm)
-			pdd->dev->kfd2kgd->destroy_process_vm(
+			amdgpu_amdkfd_gpuvm_destroy_process_vm(
 				pdd->dev->kgd, pdd->vm);
 
 		list_del(&pdd->per_device_list);
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
 	dev = pdd->dev;
 
 	if (drm_file)
-		ret = dev->kfd2kgd->acquire_process_vm(
+		ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
 			dev->kgd, drm_file, p->pasid,
 			&pdd->vm, &p->kgd_process_info, &p->ef);
 	else
-		ret = dev->kfd2kgd->create_process_vm(
-			dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+		ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
+			&pdd->vm, &p->kgd_process_info, &p->ef);
 	if (ret) {
 		pr_err("Failed to create process VM object\n");
 		return ret;
@@ -714,7 +716,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
 err_reserve_ib_mem:
 	kfd_process_device_free_bos(pdd);
 	if (!drm_file)
-		dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+		amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
 	pdd->vm = NULL;
 
 	return ret;
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work)
 	 */
 
 	p->last_restore_timestamp = get_jiffies_64();
-	ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+	ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
 						     &p->ef);
 	if (ret) {
 		pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e3843c5..5f5b2ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,6 +36,7 @@
 #include "kfd_topology.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
 
 /* topology_device_list - Master list of all topology devices */
 static struct list_head topology_device_list;
@@ -100,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
 	down_read(&topology_lock);
 
 	list_for_each_entry(top_dev, &topology_device_list, list)
-		if (top_dev->gpu->pdev == pdev) {
+		if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+			device = top_dev->gpu;
+			break;
+		}
+
+	up_read(&topology_lock);
+
+	return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+	struct kfd_topology_device *top_dev;
+	struct kfd_dev *device = NULL;
+
+	down_read(&topology_lock);
+
+	list_for_each_entry(top_dev, &topology_device_list, list)
+		if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
 			device = top_dev->gpu;
 			break;
 		}
@@ -1052,7 +1071,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
 	if (!gpu)
 		return 0;
 
-	gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
 
 	local_mem_size = local_mem_info.local_mem_size_private +
 			local_mem_info.local_mem_size_public;
@@ -1118,8 +1137,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
 	 * for APUs - If CRAT from ACPI reports more than one bank, then
 	 *	all the banks will report the same mem_clk_max information
 	 */
-	dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
-		&local_mem_info);
+	amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
 
 	list_for_each_entry(mem, &dev->mem_props, list)
 		mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1240,7 +1258,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	 * needed for the topology
 	 */
 
-	dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+	amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
 	dev->node_props.simd_arrays_per_engine =
 		cu_info.num_shader_arrays_per_engine;
 
@@ -1249,7 +1267,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
 		gpu->pdev->devfn);
 	dev->node_props.max_engine_clk_fcompute =
-		dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+		amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
 	dev->node_props.max_engine_clk_ccompute =
 		cpufreq_quick_get_max(0) / 1000;
 	dev->node_props.drm_render_minor =
@@ -1272,12 +1290,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	case CHIP_FIJI:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
 		pr_debug("Adding doorbell packet type capability\n");
 		dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
 		break;
 	case CHIP_VEGA10:
+	case CHIP_VEGA12:
 	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c97dc96..cfde156 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
 
 #TODO: remove when Timing Sync feature is complete
 subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
 
-DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet
+DAL_LIBS = amdgpu_dm dc	modules/freesync modules/color modules/info_packet modules/power
 
 AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5a6edf6..d013159 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -23,6 +23,9 @@
  *
  */
 
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
 #include "dm_services_types.h"
 #include "dc.h"
 #include "dc/inc/core_types.h"
@@ -38,7 +41,6 @@
 #include "amd_shared.h"
 #include "amdgpu_dm_irq.h"
 #include "dm_helpers.h"
-#include "dm_services_types.h"
 #include "amdgpu_dm_mst_types.h"
 #if defined(CONFIG_DEBUG_FS)
 #include "amdgpu_dm_debugfs.h"
@@ -55,6 +57,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_mst_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -72,10 +75,22 @@
 #endif
 
 #include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
 
 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
 /* basic init/fini API */
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@ -95,7 +110,7 @@ static void
 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-				struct amdgpu_plane *aplane,
+				struct drm_plane *plane,
 				unsigned long possible_crtcs);
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			       struct drm_plane *plane,
@@ -119,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state);
 
+static void handle_cursor_update(struct drm_plane *plane,
+				 struct drm_plane_state *old_plane_state);
 
 
 
@@ -379,11 +396,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 
 }
 
-/*
- * Init display KMS
- *
- * Returns 0 on success
- */
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
 	struct dc_init_data init_data;
@@ -393,6 +405,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 	/* Zero all the fields */
 	memset(&init_data, 0, sizeof(init_data));
 
+	mutex_init(&adev->dm.dc_lock);
+
 	if(amdgpu_dm_irq_init(adev)) {
 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
 		goto error;
@@ -507,6 +521,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 	/* DC Destroy TODO: Replace destroy DAL */
 	if (adev->dm.dc)
 		dc_destroy(&adev->dm.dc);
+
+	mutex_destroy(&adev->dm.dc_lock);
+
 	return;
 }
 
@@ -638,6 +655,26 @@ static int dm_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	struct dmcu_iram_parameters params;
+	unsigned int linear_lut[16];
+	int i;
+	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+	bool ret;
+
+	for (i = 0; i < 16; i++)
+		linear_lut[i] = 0xFFFF * i / 15;
+
+	params.set = 0;
+	params.backlight_ramping_start = 0xCCCC;
+	params.backlight_ramping_reduction = 0xCCCCCCCC;
+	params.backlight_lut_array_size = 16;
+	params.backlight_lut_array = linear_lut;
+
+	ret = dmcu_load_iram(dmcu, params);
+
+	if (!ret)
+		return -EINVAL;
+
 	return detect_mst_link_for_all_connectors(adev->ddev);
 }
 
@@ -663,6 +700,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
 static int dm_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -673,6 +730,14 @@ static int dm_hw_init(void *handle)
 	return 0;
 }
 
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
 static int dm_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -898,6 +963,16 @@ static int dm_resume(void *handle)
 	return ret;
 }
 
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
 static const struct amd_ip_funcs amdgpu_dm_funcs = {
 	.name = "dm",
 	.early_init = dm_early_init,
@@ -926,53 +1001,17 @@ const struct amdgpu_ip_block_version dm_ip_block =
 };
 
 
-static struct drm_atomic_state *
-dm_atomic_state_alloc(struct drm_device *dev)
-{
-	struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
-	if (!state)
-		return NULL;
-
-	if (drm_atomic_state_init(dev, &state->base) < 0)
-		goto fail;
-
-	return &state->base;
-
-fail:
-	kfree(state);
-	return NULL;
-}
-
-static void
-dm_atomic_state_clear(struct drm_atomic_state *state)
-{
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-
-	if (dm_state->context) {
-		dc_release_state(dm_state->context);
-		dm_state->context = NULL;
-	}
-
-	drm_atomic_state_default_clear(state);
-}
-
-static void
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
-{
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-	drm_atomic_state_default_release(state);
-	kfree(dm_state);
-}
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
 
 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
 	.fb_create = amdgpu_display_user_framebuffer_create,
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = amdgpu_dm_atomic_check,
 	.atomic_commit = amdgpu_dm_atomic_commit,
-	.atomic_state_alloc = dm_atomic_state_alloc,
-	.atomic_state_clear = dm_atomic_state_clear,
-	.atomic_state_free = dm_atomic_state_alloc_free
 };
 
 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1494,8 +1533,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 }
 #endif
 
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+static int dm_atomic_get_state(struct drm_atomic_state *state,
+			       struct dm_atomic_state **dm_state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_state *priv_state;
+	int ret;
+
+	if (*dm_state)
+		return 0;
+
+	ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
+	if (ret)
+		return ret;
+
+	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+	if (IS_ERR(priv_state))
+		return PTR_ERR(priv_state);
+
+	*dm_state = to_dm_atomic_state(priv_state);
+
+	return 0;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_obj *obj;
+	struct drm_private_state *new_obj_state;
+	int i;
+
+	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+		if (obj->funcs == dm->atomic_obj.funcs)
+			return to_dm_atomic_state(new_obj_state);
+	}
+
+	return NULL;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_old_state(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_private_obj *obj;
+	struct drm_private_state *old_obj_state;
+	int i;
+
+	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
+		if (obj->funcs == dm->atomic_obj.funcs)
+			return to_dm_atomic_state(old_obj_state);
+	}
+
+	return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+	struct dm_atomic_state *old_state, *new_state;
+
+	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+	if (!new_state)
+		return NULL;
+
+	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+	new_state->context = dc_create_state();
+	if (!new_state->context) {
+		kfree(new_state);
+		return NULL;
+	}
+
+	old_state = to_dm_atomic_state(obj->state);
+	if (old_state && old_state->context)
+		dc_resource_state_copy_construct(old_state->context,
+						 new_state->context);
+
+	return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+				    struct drm_private_state *state)
+{
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+	if (dm_state && dm_state->context)
+		dc_release_state(dm_state->context);
+
+	kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+	.atomic_duplicate_state = dm_atomic_duplicate_state,
+	.atomic_destroy_state = dm_atomic_destroy_state,
+};
+
 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 {
+	struct dm_atomic_state *state;
 	int r;
 
 	adev->mode_info.mode_config_initialized = true;
@@ -1513,6 +1661,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 
 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
 
+	drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->context = dc_create_state();
+	if (!state->context) {
+		kfree(state);
+		return -ENOMEM;
+	}
+
+	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
+
+	drm_atomic_private_obj_init(&adev->dm.atomic_obj,
+				    &state->base,
+				    &dm_atomic_state_funcs);
+
 	r = amdgpu_display_modeset_create_props(adev);
 	if (r)
 		return r;
@@ -1520,15 +1686,63 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+{
+#if defined(CONFIG_ACPI)
+	struct amdgpu_dm_backlight_caps caps;
+
+	if (dm->backlight_caps.caps_valid)
+		return;
+
+	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+	if (caps.caps_valid) {
+		dm->backlight_caps.min_input_signal = caps.min_input_signal;
+		dm->backlight_caps.max_input_signal = caps.max_input_signal;
+		dm->backlight_caps.caps_valid = true;
+	} else {
+		dm->backlight_caps.min_input_signal =
+				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+		dm->backlight_caps.max_input_signal =
+				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+	}
+#else
+	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
 	struct amdgpu_display_manager *dm = bl_get_data(bd);
+	struct amdgpu_dm_backlight_caps caps;
+	uint32_t brightness = bd->props.brightness;
+
+	amdgpu_dm_update_backlight_caps(dm);
+	caps = dm->backlight_caps;
+	/*
+	 * The brightness input is in the range 0-255
+	 * It needs to be rescaled to be between the
+	 * requested min and max input signal
+	 *
+	 * It also needs to be scaled up by 0x101 to
+	 * match the DC interface which has a range of
+	 * 0 to 0xffff
+	 */
+	brightness =
+		brightness
+		* 0x101
+		* (caps.max_input_signal - caps.min_input_signal)
+		/ AMDGPU_MAX_BL_LEVEL
+		+ caps.min_input_signal * 0x101;
 
 	if (dc_link_set_backlight_level(dm->backlight_link,
-			bd->props.brightness, 0, 0))
+			brightness, 0, 0))
 		return 0;
 	else
 		return 1;
@@ -1555,6 +1769,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
 	char bl_name[16];
 	struct backlight_properties props = { 0 };
 
+	amdgpu_dm_update_backlight_caps(dm);
+
 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
 	props.brightness = AMDGPU_MAX_BL_LEVEL;
 	props.type = BACKLIGHT_RAW;
@@ -1580,18 +1796,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
 			     struct amdgpu_mode_info *mode_info,
 			     int plane_id)
 {
-	struct amdgpu_plane *plane;
+	struct drm_plane *plane;
 	unsigned long possible_crtcs;
 	int ret = 0;
 
-	plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
 	mode_info->planes[plane_id] = plane;
 
 	if (!plane) {
 		DRM_ERROR("KMS: Failed to allocate plane\n");
 		return -ENOMEM;
 	}
-	plane->base.type = mode_info->plane_type[plane_id];
+	plane->type = mode_info->plane_type[plane_id];
 
 	/*
 	 * HACK: IGT tests expect that each plane can only have
@@ -1682,7 +1898,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 	}
 
 	for (i = 0; i < dm->dc->caps.max_streams; i++)
-		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
 			DRM_ERROR("KMS: Failed to initialize crtc\n");
 			goto fail;
 		}
@@ -1786,6 +2002,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 {
 	drm_mode_config_cleanup(dm->ddev);
+	drm_atomic_private_obj_fini(&dm->atomic_obj);
 	return;
 }
 
@@ -1805,73 +2022,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
 	/* TODO: implement later */
 }
 
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
-				struct drm_file *filp)
-{
-	struct drm_atomic_state *state;
-	struct drm_modeset_acquire_ctx ctx;
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-	struct drm_connector_state *old_con_state, *new_con_state;
-	int ret = 0;
-	uint8_t i;
-	bool enable = false;
-
-	drm_modeset_acquire_init(&ctx, 0);
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		ret = -ENOMEM;
-		goto out;
-	}
-	state->acquire_ctx = &ctx;
-
-retry:
-	drm_for_each_crtc(crtc, dev) {
-		ret = drm_atomic_add_affected_connectors(state, crtc);
-		if (ret)
-			goto fail;
-
-		/* TODO rework amdgpu_dm_commit_planes so we don't need this */
-		ret = drm_atomic_add_affected_planes(state, crtc);
-		if (ret)
-			goto fail;
-	}
-
-	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
-		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
-		struct drm_crtc_state *new_crtc_state;
-		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
-		struct dm_crtc_state *dm_new_crtc_state;
-
-		if (!acrtc) {
-			ASSERT(0);
-			continue;
-		}
-
-		new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
-		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
-		dm_new_crtc_state->freesync_enabled = enable;
-	}
-
-	ret = drm_atomic_commit(state);
-
-fail:
-	if (ret == -EDEADLK) {
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
-
-	drm_atomic_state_put(state);
-
-out:
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-	return ret;
-}
-
 static const struct amdgpu_display_funcs dm_display_funcs = {
 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
@@ -1884,8 +2034,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
 		dm_crtc_get_scanoutpos,/* called unconditionally */
 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
-	.notify_freesync = amdgpu_notify_freesync,
-
 };
 
 #if defined(CONFIG_DEBUG_KERNEL_DC)
@@ -2486,7 +2634,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
 static void
 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 					     const struct drm_display_mode *mode_in,
-					     const struct drm_connector *connector)
+					     const struct drm_connector *connector,
+					     const struct dc_stream_state *old_stream)
 {
 	struct dc_crtc_timing *timing_out = &stream->timing;
 	const struct drm_display_info *info = &connector->display_info;
@@ -2512,7 +2661,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 			connector);
 	timing_out->scan_type = SCANNING_TYPE_NODATA;
 	timing_out->hdmi_vic = 0;
-	timing_out->vic = drm_match_cea_mode(mode_in);
+
+	if(old_stream) {
+		timing_out->vic = old_stream->timing.vic;
+		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+	} else {
+		timing_out->vic = drm_match_cea_mode(mode_in);
+		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+	}
 
 	timing_out->h_addressable = mode_in->crtc_hdisplay;
 	timing_out->h_total = mode_in->crtc_htotal;
@@ -2528,10 +2688,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
 	timing_out->pix_clk_khz = mode_in->crtc_clock;
 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
-	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
-		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
-	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
-		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
 
 	stream->output_color_space = get_output_color_space(timing_out);
 
@@ -2694,13 +2850,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 static struct dc_stream_state *
 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		       const struct drm_display_mode *drm_mode,
-		       const struct dm_connector_state *dm_state)
+		       const struct dm_connector_state *dm_state,
+		       const struct dc_stream_state *old_stream)
 {
 	struct drm_display_mode *preferred_mode = NULL;
 	struct drm_connector *drm_connector;
 	struct dc_stream_state *stream = NULL;
 	struct drm_display_mode mode = *drm_mode;
 	bool native_mode_found = false;
+	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+	int mode_refresh;
+	int preferred_refresh = 0;
+
 	struct dc_sink *sink = NULL;
 	if (aconnector == NULL) {
 		DRM_ERROR("aconnector is NULL!\n");
@@ -2739,6 +2900,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 				struct drm_display_mode,
 				head);
 
+	mode_refresh = drm_mode_vrefresh(&mode);
+
 	if (preferred_mode == NULL) {
 		/*
 		 * This may not be an error, the use case is when we have no
@@ -2751,13 +2914,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		decide_crtc_timing_for_drm_display_mode(
 				&mode, preferred_mode,
 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
+		preferred_refresh = drm_mode_vrefresh(preferred_mode);
 	}
 
 	if (!dm_state)
 		drm_mode_set_crtcinfo(&mode, 0);
 
-	fill_stream_properties_from_drm_display_mode(stream,
-			&mode, &aconnector->base);
+	/*
+	* If scaling is enabled and refresh rate didn't change
+	* we copy the vic and polarities of the old timings
+	*/
+	if (!scale || mode_refresh != preferred_refresh)
+		fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base, NULL);
+	else
+		fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base, old_stream);
+
 	update_stream_scaling_settings(&mode, dm_state, stream);
 
 	fill_audio_info(
@@ -2769,6 +2942,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
 	if (dm_state && dm_state->freesync_capable)
 		stream->ignore_msa_timing_param = true;
+
 finish:
 	if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
 		dc_sink_release(sink);
@@ -2837,7 +3011,10 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
 
 	state->adjust = cur->adjust;
 	state->vrr_infopacket = cur->vrr_infopacket;
-	state->freesync_enabled = cur->freesync_enabled;
+	state->abm_level = cur->abm_level;
+	state->vrr_supported = cur->vrr_supported;
+	state->freesync_config = cur->freesync_config;
+	state->crc_enabled = cur->crc_enabled;
 
 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
 
@@ -2953,6 +3130,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
 	} else if (property == adev->mode_info.max_bpc_property) {
 		dm_new_state->max_bpc = val;
 		ret = 0;
+	} else if (property == adev->mode_info.abm_level_property) {
+		dm_new_state->abm_level = val;
+		ret = 0;
 	}
 
 	return ret;
@@ -2998,7 +3178,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
 	} else if (property == adev->mode_info.max_bpc_property) {
 		*val = dm_state->max_bpc;
 		ret = 0;
+	} else if (property == adev->mode_info.abm_level_property) {
+		*val = dm_state->abm_level;
+		ret = 0;
 	}
+
 	return ret;
 }
 
@@ -3063,7 +3247,11 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
 
 	new_state->freesync_capable = state->freesync_capable;
-	new_state->freesync_enable = state->freesync_enable;
+	new_state->abm_level = state->abm_level;
+	new_state->scaling = state->scaling;
+	new_state->underscan_enable = state->underscan_enable;
+	new_state->underscan_hborder = state->underscan_hborder;
+	new_state->underscan_vborder = state->underscan_vborder;
 	new_state->max_bpc = state->max_bpc;
 
 	return &new_state->base;
@@ -3166,7 +3354,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
 		goto fail;
 	}
 
-	stream = create_stream_for_sink(aconnector, mode, NULL);
+	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
 	if (stream == NULL) {
 		DRM_ERROR("Failed to create stream for sink!\n");
 		goto fail;
@@ -3200,7 +3388,6 @@ amdgpu_dm_connector_helper_funcs = {
 	 */
 	.get_modes = get_modes,
 	.mode_valid = amdgpu_dm_connector_mode_valid,
-	.best_encoder = drm_atomic_helper_best_encoder
 };
 
 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -3438,10 +3625,43 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
 	return -EINVAL;
 }
 
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+				       struct drm_plane_state *new_plane_state)
+{
+	/* Only support async updates on cursor planes. */
+	if (plane->type != DRM_PLANE_TYPE_CURSOR)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+					 struct drm_plane_state *new_state)
+{
+	struct drm_plane_state *old_state =
+		drm_atomic_get_old_plane_state(new_state->state, plane);
+
+	if (plane->state->fb != new_state->fb)
+		drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+	plane->state->src_x = new_state->src_x;
+	plane->state->src_y = new_state->src_y;
+	plane->state->src_w = new_state->src_w;
+	plane->state->src_h = new_state->src_h;
+	plane->state->crtc_x = new_state->crtc_x;
+	plane->state->crtc_y = new_state->crtc_y;
+	plane->state->crtc_w = new_state->crtc_w;
+	plane->state->crtc_h = new_state->crtc_h;
+
+	handle_cursor_update(plane, old_state);
+}
+
 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
 	.prepare_fb = dm_plane_helper_prepare_fb,
 	.cleanup_fb = dm_plane_helper_cleanup_fb,
 	.atomic_check = dm_plane_atomic_check,
+	.atomic_async_check = dm_plane_atomic_async_check,
+	.atomic_async_update = dm_plane_atomic_async_update
 };
 
 /*
@@ -3473,49 +3693,49 @@ static const u32 cursor_formats[] = {
 };
 
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-				struct amdgpu_plane *aplane,
+				struct drm_plane *plane,
 				unsigned long possible_crtcs)
 {
 	int res = -EPERM;
 
-	switch (aplane->base.type) {
+	switch (plane->type) {
 	case DRM_PLANE_TYPE_PRIMARY:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				rgb_formats,
 				ARRAY_SIZE(rgb_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	case DRM_PLANE_TYPE_OVERLAY:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				yuv_formats,
 				ARRAY_SIZE(yuv_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	case DRM_PLANE_TYPE_CURSOR:
 		res = drm_universal_plane_init(
 				dm->adev->ddev,
-				&aplane->base,
+				plane,
 				possible_crtcs,
 				&dm_plane_funcs,
 				cursor_formats,
 				ARRAY_SIZE(cursor_formats),
-				NULL, aplane->base.type, NULL);
+				NULL, plane->type, NULL);
 		break;
 	}
 
-	drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
 
 	/* Create (reset) the plane state */
-	if (aplane->base.funcs->reset)
-		aplane->base.funcs->reset(&aplane->base);
+	if (plane->funcs->reset)
+		plane->funcs->reset(plane);
 
 
 	return res;
@@ -3526,7 +3746,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			       uint32_t crtc_index)
 {
 	struct amdgpu_crtc *acrtc = NULL;
-	struct amdgpu_plane *cursor_plane;
+	struct drm_plane *cursor_plane;
 
 	int res = -ENOMEM;
 
@@ -3534,7 +3754,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 	if (!cursor_plane)
 		goto fail;
 
-	cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
 
 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
@@ -3545,7 +3765,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 			dm->ddev,
 			&acrtc->base,
 			plane,
-			&cursor_plane->base,
+			cursor_plane,
 			&amdgpu_dm_crtc_funcs, NULL);
 
 	if (res)
@@ -3603,14 +3823,17 @@ static int to_drm_connector_type(enum signal_type st)
 	}
 }
 
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
 {
-	const struct drm_connector_helper_funcs *helper =
-		connector->helper_private;
 	struct drm_encoder *encoder;
 	struct amdgpu_encoder *amdgpu_encoder;
 
-	encoder = helper->best_encoder(connector);
+	encoder = amdgpu_dm_connector_to_encoder(connector);
 
 	if (encoder == NULL)
 		return;
@@ -3737,14 +3960,12 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
 
 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
 {
-	const struct drm_connector_helper_funcs *helper =
-			connector->helper_private;
 	struct amdgpu_dm_connector *amdgpu_dm_connector =
 			to_amdgpu_dm_connector(connector);
 	struct drm_encoder *encoder;
 	struct edid *edid = amdgpu_dm_connector->edid;
 
-	encoder = helper->best_encoder(connector);
+	encoder = amdgpu_dm_connector_to_encoder(connector);
 
 	if (!edid || !drm_edid_is_valid(edid)) {
 		amdgpu_dm_connector->num_modes =
@@ -3783,12 +4004,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 	case DRM_MODE_CONNECTOR_HDMIA:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 		aconnector->base.ycbcr_420_allowed =
-			link->link_enc->features.ycbcr420_supported ? true : false;
+			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
 		aconnector->base.ycbcr_420_allowed =
-			link->link_enc->features.ycbcr420_supported ? true : false;
+			link->link_enc->features.dp_ycbcr420_supported ? true : false;
 		break;
 	case DRM_MODE_CONNECTOR_DVID:
 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3814,6 +4035,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 				adev->mode_info.max_bpc_property,
 				0);
 
+	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+	    dc_is_dmcu_initialized(adev->dm.dc)) {
+		drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.abm_level_property, 0);
+	}
+
+	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+	    connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		drm_connector_attach_vrr_capable_property(
+			&aconnector->base);
+	}
 }
 
 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -4118,6 +4350,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
 static void handle_cursor_update(struct drm_plane *plane,
 				 struct drm_plane_state *old_plane_state)
 {
+	struct amdgpu_device *adev = plane->dev->dev_private;
 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4142,9 +4375,12 @@ static void handle_cursor_update(struct drm_plane *plane,
 
 	if (!position.enable) {
 		/* turn off cursor */
-		if (crtc_state && crtc_state->stream)
+		if (crtc_state && crtc_state->stream) {
+			mutex_lock(&adev->dm.dc_lock);
 			dc_stream_set_cursor_position(crtc_state->stream,
 						      &position);
+			mutex_unlock(&adev->dm.dc_lock);
+		}
 		return;
 	}
 
@@ -4162,6 +4398,7 @@ static void handle_cursor_update(struct drm_plane *plane,
 	attributes.pitch = attributes.width;
 
 	if (crtc_state->stream) {
+		mutex_lock(&adev->dm.dc_lock);
 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
 							 &attributes))
 			DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4169,6 +4406,7 @@ static void handle_cursor_update(struct drm_plane *plane,
 		if (!dc_stream_set_cursor_position(crtc_state->stream,
 						   &position))
 			DRM_ERROR("DC failed to set cursor position\n");
+		mutex_unlock(&adev->dm.dc_lock);
 	}
 }
 
@@ -4190,6 +4428,91 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
 						 acrtc->crtc_id);
 }
 
+struct dc_stream_status *dc_state_get_stream_status(
+	struct dc_state *state,
+	struct dc_stream_state *stream)
+{
+	uint8_t i;
+
+	for (i = 0; i < state->stream_count; i++) {
+		if (stream == state->streams[i])
+			return &state->stream_status[i];
+	}
+
+	return NULL;
+}
+
+static void update_freesync_state_on_stream(
+	struct amdgpu_display_manager *dm,
+	struct dm_crtc_state *new_crtc_state,
+	struct dc_stream_state *new_stream)
+{
+	struct mod_vrr_params vrr = {0};
+	struct dc_info_packet vrr_infopacket = {0};
+	struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+	if (!new_stream)
+		return;
+
+	/*
+	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
+	 * For now it's sufficient to just guard against these conditions.
+	 */
+
+	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+		return;
+
+	if (new_crtc_state->vrr_supported &&
+	    config.min_refresh_in_uhz &&
+	    config.max_refresh_in_uhz) {
+		config.state = new_crtc_state->base.vrr_enabled ?
+			VRR_STATE_ACTIVE_VARIABLE :
+			VRR_STATE_INACTIVE;
+	} else {
+		config.state = VRR_STATE_UNSUPPORTED;
+	}
+
+	mod_freesync_build_vrr_params(dm->freesync_module,
+				      new_stream,
+				      &config, &vrr);
+
+	mod_freesync_build_vrr_infopacket(
+		dm->freesync_module,
+		new_stream,
+		&vrr,
+		PACKET_TYPE_VRR,
+		TRANSFER_FUNC_UNKNOWN,
+		&vrr_infopacket);
+
+	new_crtc_state->freesync_timing_changed =
+		(memcmp(&new_crtc_state->adjust,
+			&vrr.adjust,
+			sizeof(vrr.adjust)) != 0);
+
+	new_crtc_state->freesync_vrr_info_changed =
+		(memcmp(&new_crtc_state->vrr_infopacket,
+			&vrr_infopacket,
+			sizeof(vrr_infopacket)) != 0);
+
+	new_crtc_state->adjust = vrr.adjust;
+	new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+	new_stream->adjust = new_crtc_state->adjust;
+	new_stream->vrr_infopacket = vrr_infopacket;
+
+	if (new_crtc_state->freesync_vrr_info_changed)
+		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+			      new_crtc_state->base.crtc->base.id,
+			      (int)new_crtc_state->base.vrr_enabled,
+			      (int)vrr.state);
+
+	if (new_crtc_state->freesync_timing_changed)
+		DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+			      new_crtc_state->base.crtc->base.id,
+			      vrr.adjust.v_total_min,
+			      vrr.adjust.v_total_max);
+}
+
 /*
  * Executes flip
  *
@@ -4211,6 +4534,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	struct dc_flip_addrs addr = { {0} };
 	/* TODO eliminate or rename surface_update */
 	struct dc_surface_update surface_updates[1] = { {0} };
+	struct dc_stream_update stream_update = {0};
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 	struct dc_stream_status *stream_status;
 
@@ -4283,13 +4607,30 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	}
 	surface_updates->flip_addr = &addr;
 
+	if (acrtc_state->stream) {
+		update_freesync_state_on_stream(
+			&adev->dm,
+			acrtc_state,
+			acrtc_state->stream);
+
+		if (acrtc_state->freesync_timing_changed)
+			stream_update.adjust =
+				&acrtc_state->stream->adjust;
+
+		if (acrtc_state->freesync_vrr_info_changed)
+			stream_update.vrr_infopacket =
+				&acrtc_state->stream->vrr_infopacket;
+	}
+
+	mutex_lock(&adev->dm.dc_lock);
 	dc_commit_updates_for_stream(adev->dm.dc,
 					     surface_updates,
 					     1,
 					     acrtc_state->stream,
-					     NULL,
+					     &stream_update,
 					     &surface_updates->surface,
 					     state);
+	mutex_unlock(&adev->dm.dc_lock);
 
 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
 			 __func__,
@@ -4304,6 +4645,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
  * with a dc_plane_state and follow the atomic model a bit more closely here.
  */
 static bool commit_planes_to_stream(
+		struct amdgpu_display_manager *dm,
 		struct dc *dc,
 		struct dc_plane_state **plane_states,
 		uint8_t new_plane_count,
@@ -4320,6 +4662,7 @@ static bool commit_planes_to_stream(
 	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
 	struct dc_stream_update *stream_update =
 			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+	unsigned int abm_level;
 
 	if (!stream_update) {
 		BREAK_TO_DEBUGGER();
@@ -4347,9 +4690,9 @@ static bool commit_planes_to_stream(
 	stream_update->dst = dc_stream->dst;
 	stream_update->out_transfer_func = dc_stream->out_transfer_func;
 
-	if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) {
-		stream_update->vrr_infopacket = &dc_stream->vrr_infopacket;
-		stream_update->adjust = &dc_stream->adjust;
+	if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+		abm_level = dm_new_crtc_state->abm_level;
+		stream_update->abm_level = &abm_level;
 	}
 
 	for (i = 0; i < new_plane_count; i++) {
@@ -4379,11 +4722,13 @@ static bool commit_planes_to_stream(
 		updates[i].scaling_info = &scaling_info[i];
 	}
 
+	mutex_lock(&dm->dc_lock);
 	dc_commit_updates_for_stream(
 			dc,
 			updates,
 			new_plane_count,
 			dc_stream, stream_update, plane_states, state);
+	mutex_unlock(&dm->dc_lock);
 
 	kfree(flip_addr);
 	kfree(plane_info);
@@ -4393,6 +4738,7 @@ static bool commit_planes_to_stream(
 }
 
 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+				    struct dc_state *dc_state,
 				    struct drm_device *dev,
 				    struct amdgpu_display_manager *dm,
 				    struct drm_crtc *pcrtc,
@@ -4409,7 +4755,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
 	struct dm_crtc_state *dm_old_crtc_state =
 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	int planes_count = 0;
 	unsigned long flags;
 
@@ -4470,7 +4815,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 				crtc,
 				fb,
 				(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
-				dm_state->context);
+				dc_state);
 		}
 
 	}
@@ -4487,15 +4832,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 		}
 
-		dc_stream_attach->adjust = acrtc_state->adjust;
-		dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket;
+		dc_stream_attach->abm_level = acrtc_state->abm_level;
 
-		if (false == commit_planes_to_stream(dm->dc,
+		if (false == commit_planes_to_stream(dm,
+							dm->dc,
 							plane_states_constructed,
 							planes_count,
 							acrtc_state,
 							dm_old_crtc_state,
-							dm_state->context))
+							dc_state))
 			dm_error("%s: Failed to attach plane!\n", __func__);
 	} else {
 		/*TODO BUG Here should go disable planes on CRTC. */
@@ -4549,12 +4894,21 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
 	/*TODO Handle EINTR, reenable IRQ*/
 }
 
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_display_manager *dm = &adev->dm;
 	struct dm_atomic_state *dm_state;
+	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
 	uint32_t i, j;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4567,7 +4921,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
-	dm_state = to_dm_atomic_state(state);
+	dm_state = dm_atomic_get_new_state(state);
+	if (dm_state && dm_state->context) {
+		dc_state = dm_state->context;
+	} else {
+		/* No state changes, retain current state. */
+		dc_state_temp = dc_create_state();
+		ASSERT(dc_state_temp);
+		dc_state = dc_state_temp;
+		dc_resource_state_copy_construct_current(dm->dc, dc_state);
+	}
 
 	/* update changed items */
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4640,9 +5003,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 	} /* for_each_crtc_in_state() */
 
-	if (dm_state->context) {
-		dm_enable_per_frame_crtc_master_sync(dm_state->context);
-		WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+	if (dc_state) {
+		dm_enable_per_frame_crtc_master_sync(dc_state);
+		mutex_lock(&dm->dc_lock);
+		WARN_ON(!dc_commit_state(dm->dc, dc_state));
+		mutex_unlock(&dm->dc_lock);
 	}
 
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4655,13 +5020,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 					dc_stream_get_status(dm_new_crtc_state->stream);
 
 			if (!status)
+				status = dc_state_get_stream_status(dc_state,
+								    dm_new_crtc_state->stream);
+
+			if (!status)
 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
 			else
 				acrtc->otg_inst = status->primary_otg_inst;
 		}
 	}
 
-	/* Handle scaling and underscan changes*/
+	/* Handle scaling, underscan, and abm changes*/
 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
@@ -4677,11 +5046,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
 			continue;
 
-		/* Skip anything that is not scaling or underscan changes */
-		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
-			continue;
 
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+		/* Skip anything that is not scaling or underscan changes */
+		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+				(dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+			continue;
 
 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -4693,17 +5065,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		WARN_ON(!status);
 		WARN_ON(!status->plane_count);
 
-		dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust;
-		dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket;
+		dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
 
 		/*TODO How it works with MPO ?*/
 		if (!commit_planes_to_stream(
+				dm,
 				dm->dc,
 				status->plane_states,
 				status->plane_count,
 				dm_new_crtc_state,
 				to_dm_crtc_state(old_crtc_state),
-				dm_state->context))
+				dc_state))
 			dm_error("%s: Failed to update stream scaling!\n", __func__);
 	}
 
@@ -4736,7 +5108,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
 		if (dm_new_crtc_state->stream)
-			amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+			amdgpu_dm_commit_planes(state, dc_state, dev,
+						dm, crtc, &wait_for_vblank);
 	}
 
 
@@ -4776,6 +5149,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 	for (i = 0; i < crtc_disable_count; i++)
 		pm_runtime_put_autosuspend(dev->dev);
 	pm_runtime_mark_last_busy(dev->dev);
+
+	if (dc_state_temp)
+		dc_release_state(dc_state_temp);
 }
 
 
@@ -4919,20 +5295,18 @@ static int do_aquire_global_lock(struct drm_device *dev,
 	return ret < 0 ? ret : 0;
 }
 
-void set_freesync_on_stream(struct amdgpu_display_manager *dm,
-			    struct dm_crtc_state *new_crtc_state,
-			    struct dm_connector_state *new_con_state,
-			    struct dc_stream_state *new_stream)
+static void get_freesync_config_for_crtc(
+	struct dm_crtc_state *new_crtc_state,
+	struct dm_connector_state *new_con_state)
 {
 	struct mod_freesync_config config = {0};
-	struct mod_vrr_params vrr = {0};
-	struct dc_info_packet vrr_infopacket = {0};
 	struct amdgpu_dm_connector *aconnector =
 			to_amdgpu_dm_connector(new_con_state->base.connector);
 
-	if (new_con_state->freesync_capable &&
-	    new_con_state->freesync_enable) {
-		config.state = new_crtc_state->freesync_enabled ?
+	new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+
+	if (new_con_state->freesync_capable) {
+		config.state = new_crtc_state->base.vrr_enabled ?
 				VRR_STATE_ACTIVE_VARIABLE :
 				VRR_STATE_INACTIVE;
 		config.min_refresh_in_uhz =
@@ -4942,19 +5316,18 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
 		config.vsif_supported = true;
 	}
 
-	mod_freesync_build_vrr_params(dm->freesync_module,
-				      new_stream,
-				      &config, &vrr);
+	new_crtc_state->freesync_config = config;
+}
 
-	mod_freesync_build_vrr_infopacket(dm->freesync_module,
-					  new_stream,
-					  &vrr,
-					  packet_type_fs1,
-					  NULL,
-					  &vrr_infopacket);
+static void reset_freesync_config_for_crtc(
+	struct dm_crtc_state *new_crtc_state)
+{
+	new_crtc_state->vrr_supported = false;
 
-	new_crtc_state->adjust = vrr.adjust;
-	new_crtc_state->vrr_infopacket = vrr_infopacket;
+	memset(&new_crtc_state->adjust, 0,
+	       sizeof(new_crtc_state->adjust));
+	memset(&new_crtc_state->vrr_infopacket, 0,
+	       sizeof(new_crtc_state->vrr_infopacket));
 }
 
 static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
@@ -4962,11 +5335,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 				 bool enable,
 				 bool *lock_and_validation_needed)
 {
+	struct dm_atomic_state *dm_state = NULL;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	int i;
 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct dc_stream_state *new_stream;
 	int ret = 0;
 
@@ -5014,7 +5387,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 
 			new_stream = create_stream_for_sink(aconnector,
 							     &new_crtc_state->mode,
-							    dm_new_conn_state);
+							    dm_new_conn_state,
+							    dm_old_crtc_state->stream);
 
 			/*
 			 * we can have no stream on ACTION_SET if a display
@@ -5029,8 +5403,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 				break;
 			}
 
-			set_freesync_on_stream(dm, dm_new_crtc_state,
-					       dm_new_conn_state, new_stream);
+			dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
 
 			if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
 			    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
@@ -5040,9 +5413,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			}
 		}
 
-		if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled)
-			new_crtc_state->mode_changed = true;
-
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
 			goto next_crtc;
 
@@ -5064,6 +5434,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			if (!dm_old_crtc_state->stream)
 				goto next_crtc;
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret)
+				goto fail;
+
 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
 					crtc->base.id);
 
@@ -5079,6 +5453,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			dc_stream_release(dm_old_crtc_state->stream);
 			dm_new_crtc_state->stream = NULL;
 
+			reset_freesync_config_for_crtc(dm_new_crtc_state);
+
 			*lock_and_validation_needed = true;
 
 		} else {/* Add stream for any updated/enabled CRTC */
@@ -5098,6 +5474,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 
 				WARN_ON(dm_new_crtc_state->stream);
 
+				ret = dm_atomic_get_state(state, &dm_state);
+				if (ret)
+					goto fail;
+
 				dm_new_crtc_state->stream = new_stream;
 
 				dc_stream_retain(new_stream);
@@ -5156,7 +5536,9 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
 			amdgpu_dm_set_ctm(dm_new_crtc_state);
 		}
 
-
+		/* Update Freesync settings. */
+		get_freesync_config_for_crtc(dm_new_crtc_state,
+					     dm_new_conn_state);
 	}
 
 	return ret;
@@ -5172,12 +5554,13 @@ static int dm_update_planes_state(struct dc *dc,
 				  bool enable,
 				  bool *lock_and_validation_needed)
 {
+
+	struct dm_atomic_state *dm_state = NULL;
 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_plane *plane;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
 	int i ;
 	/* TODO return page_flip_needed() function */
@@ -5215,6 +5598,10 @@ static int dm_update_planes_state(struct dc *dc,
 			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
 					plane->base.id, old_plane_crtc->base.id);
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret)
+				return ret;
+
 			if (!dc_remove_plane_from_context(
 					dc,
 					dm_old_crtc_state->stream,
@@ -5269,6 +5656,12 @@ static int dm_update_planes_state(struct dc *dc,
 				return ret;
 			}
 
+			ret = dm_atomic_get_state(state, &dm_state);
+			if (ret) {
+				dc_plane_state_release(dc_new_plane_state);
+				return ret;
+			}
+
 			/*
 			 * Any atomic check errors that occur after this will
 			 * not need a release. The plane state will be attached
@@ -5300,11 +5693,14 @@ static int dm_update_planes_state(struct dc *dc,
 
 	return ret;
 }
-enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
+
+static int
+dm_determine_update_type_for_commit(struct dc *dc,
+				    struct drm_atomic_state *state,
+				    enum surface_update_type *out_type)
 {
-
-
-	int i, j, num_plane;
+	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
+	int i, j, num_plane, ret = 0;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5320,6 +5716,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 	struct dc_stream_update stream_update;
 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
 
+	if (!updates || !surface) {
+		DRM_ERROR("Plane or surface update failed to allocate");
+		/* Set type to FULL to avoid crashing in DC*/
+		update_type = UPDATE_TYPE_FULL;
+		goto cleanup;
+	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -5372,35 +5774,73 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
 			}
 
 			if (num_plane > 0) {
-				status = dc_stream_get_status(new_dm_crtc_state->stream);
+				ret = dm_atomic_get_state(state, &dm_state);
+				if (ret)
+					goto cleanup;
+
+				old_dm_state = dm_atomic_get_old_state(state);
+				if (!old_dm_state) {
+					ret = -EINVAL;
+					goto cleanup;
+				}
+
+				status = dc_state_get_stream_status(old_dm_state->context,
+								    new_dm_crtc_state->stream);
+
 				update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
 										  &stream_update, status);
 
 				if (update_type > UPDATE_TYPE_MED) {
 					update_type = UPDATE_TYPE_FULL;
-					goto ret;
+					goto cleanup;
 				}
 			}
 
 		} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
 			update_type = UPDATE_TYPE_FULL;
-			goto ret;
+			goto cleanup;
 		}
 	}
 
-ret:
+cleanup:
 	kfree(updates);
 	kfree(surface);
 
-	return update_type;
+	*out_type = update_type;
+	return ret;
 }
 
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state)
 {
 	struct amdgpu_device *adev = dev->dev_private;
+	struct dm_atomic_state *dm_state = NULL;
 	struct dc *dc = adev->dm.dc;
-	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	struct drm_connector *connector;
 	struct drm_connector_state *old_con_state, *new_con_state;
 	struct drm_crtc *crtc;
@@ -5421,12 +5861,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		goto fail;
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-		struct dm_crtc_state *dm_old_crtc_state  = to_dm_crtc_state(old_crtc_state);
-
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->color_mgmt_changed &&
-		    (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+		    !new_crtc_state->vrr_enabled)
 			continue;
 
 		if (!new_crtc_state->enable)
@@ -5441,10 +5878,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 			goto fail;
 	}
 
-	dm_state->context = dc_create_state();
-	ASSERT(dm_state->context);
-	dc_resource_state_copy_construct_current(dc, dm_state->context);
-
 	/* Remove exiting planes if they are modified */
 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
 	if (ret) {
@@ -5497,16 +5930,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		lock_and_validation_needed = true;
 	}
 
-	/*
-	 * For full updates case when
-	 * removing/adding/updating streams on one CRTC while flipping
-	 * on another CRTC,
-	 * acquiring global lock  will guarantee that any such full
-	 * update commit
-	 * will wait for completion of any outstanding flip using DRMs
-	 * synchronization events.
-	 */
-	update_type = dm_determine_update_type_for_commit(dc, state);
+	ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+	if (ret)
+		goto fail;
 
 	if (overall_update_type < update_type)
 		overall_update_type = update_type;
@@ -5524,6 +5950,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 
 
 	if (overall_update_type > UPDATE_TYPE_FAST) {
+		ret = dm_atomic_get_state(state, &dm_state);
+		if (ret)
+			goto fail;
 
 		ret = do_aquire_global_lock(dev, state);
 		if (ret)
@@ -5533,6 +5962,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 			ret = -EINVAL;
 			goto fail;
 		}
+	} else if (state->legacy_cursor_update) {
+		/*
+		 * This is a fast cursor update coming from the plane update
+		 * helper, check if it can be done asynchronously for better
+		 * performance.
+		 */
+		state->async_update = !drm_atomic_helper_async_check(dev, state);
 	}
 
 	/* Must be success */
@@ -5578,14 +6014,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 	struct detailed_data_monitor_range *range;
 	struct amdgpu_dm_connector *amdgpu_dm_connector =
 			to_amdgpu_dm_connector(connector);
-	struct dm_connector_state *dm_con_state;
+	struct dm_connector_state *dm_con_state = NULL;
 
 	struct drm_device *dev = connector->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	bool freesync_capable = false;
 
 	if (!connector->state) {
 		DRM_ERROR("%s - Connector has no state", __func__);
-		return;
+		goto update;
 	}
 
 	if (!edid) {
@@ -5595,9 +6032,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 		amdgpu_dm_connector->max_vfreq = 0;
 		amdgpu_dm_connector->pixel_clock_mhz = 0;
 
-		dm_con_state->freesync_capable = false;
-		dm_con_state->freesync_enable = false;
-		return;
+		goto update;
 	}
 
 	dm_con_state = to_dm_connector_state(connector->state);
@@ -5605,10 +6040,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 	edid_check_required = false;
 	if (!amdgpu_dm_connector->dc_sink) {
 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
-		return;
+		goto update;
 	}
 	if (!adev->dm.freesync_module)
-		return;
+		goto update;
 	/*
 	 * if edid non zero restrict freesync only for dp and edp
 	 */
@@ -5620,7 +6055,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 						amdgpu_dm_connector);
 		}
 	}
-	dm_con_state->freesync_capable = false;
 	if (edid_check_required == true && (edid->version > 1 ||
 	   (edid->version == 1 && edid->revision > 1))) {
 		for (i = 0; i < 4; i++) {
@@ -5652,8 +6086,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
 		if (amdgpu_dm_connector->max_vfreq -
 		    amdgpu_dm_connector->min_vfreq > 10) {
 
-			dm_con_state->freesync_capable = true;
+			freesync_capable = true;
 		}
 	}
+
+update:
+	if (dm_con_state)
+		dm_con_state->freesync_capable = freesync_capable;
+
+	if (connector->vrr_capable_property)
+		drm_connector_set_vrr_capable_property(connector,
+						       freesync_capable);
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6e069d7..25bb91e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,60 +59,140 @@ struct common_irq_params {
 	enum dc_irq_source irq_src;
 };
 
+/**
+ * struct irq_list_head - Linked-list for low context IRQ handlers.
+ *
+ * @head: The list_head within &struct handler_data
+ * @work: A work_struct containing the deferred handler work
+ */
 struct irq_list_head {
 	struct list_head head;
 	/* In case this interrupt needs post-processing, 'work' will be queued*/
 	struct work_struct work;
 };
 
+/**
+ * struct dm_compressor_info - Buffer info used by frame buffer compression
+ * @cpu_addr: MMIO cpu addr
+ * @bo_ptr: Pointer to the buffer object
+ * @gpu_addr: MMIO gpu addr
+ */
 struct dm_comressor_info {
 	void *cpu_addr;
 	struct amdgpu_bo *bo_ptr;
 	uint64_t gpu_addr;
 };
 
+/**
+ * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
+ * @min_input_signal: minimum possible input in range 0-255
+ * @max_input_signal: maximum possible input in range 0-255
+ * @caps_valid: true if these values are from the ACPI interface
+ */
+struct amdgpu_dm_backlight_caps {
+	int min_input_signal;
+	int max_input_signal;
+	bool caps_valid;
+};
+
+/**
+ * struct amdgpu_display_manager - Central amdgpu display manager device
+ *
+ * @dc: Display Core control structure
+ * @adev: AMDGPU base driver structure
+ * @ddev: DRM base driver structure
+ * @display_indexes_num: Max number of display streams supported
+ * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
+ * @backlight_dev: Backlight control device
+ * @cached_state: Caches device atomic state for suspend/resume
+ * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ */
 struct amdgpu_display_manager {
+
 	struct dc *dc;
+
+	/**
+	 * @cgs_device:
+	 *
+	 * The Common Graphics Services device. It provides an interface for
+	 * accessing registers.
+	 */
 	struct cgs_device *cgs_device;
 
-	struct amdgpu_device *adev;	/*AMD base driver*/
-	struct drm_device *ddev;	/*DRM base driver*/
+	struct amdgpu_device *adev;
+	struct drm_device *ddev;
 	u16 display_indexes_num;
 
-	/*
-	 * 'irq_source_handler_table' holds a list of handlers
-	 * per (DAL) IRQ source.
+	/**
+	 * @atomic_obj
 	 *
-	 * Each IRQ source may need to be handled at different contexts.
-	 * By 'context' we mean, for example:
-	 * - The ISR context, which is the direct interrupt handler.
-	 * - The 'deferred' context - this is the post-processing of the
-	 *	interrupt, but at a lower priority.
+	 * In combination with &dm_atomic_state it helps manage
+	 * global atomic state that doesn't map cleanly into existing
+	 * drm resources, like &dc_context.
+	 */
+	struct drm_private_obj atomic_obj;
+
+	struct drm_modeset_lock atomic_obj_lock;
+
+	/**
+	 * @dc_lock:
+	 *
+	 * Guards access to DC functions that can issue register write
+	 * sequences.
+	 */
+	struct mutex dc_lock;
+
+	/**
+	 * @irq_handler_list_low_tab:
+	 *
+	 * Low priority IRQ handler table.
+	 *
+	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
+	 * source. Low priority IRQ handlers are deferred to a workqueue to be
+	 * processed. Hence, they can sleep.
 	 *
 	 * Note that handlers are called in the same order as they were
 	 * registered (FIFO).
 	 */
 	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+
+	/**
+	 * @irq_handler_list_high_tab:
+	 *
+	 * High priority IRQ handler table.
+	 *
+	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
+	 * handlers in this table are not deferred and are called immediately.
+	 */
 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
 
+	/**
+	 * @pflip_params:
+	 *
+	 * Page flip IRQ parameters, passed to registered handlers when
+	 * triggered.
+	 */
 	struct common_irq_params
 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
 
+	/**
+	 * @vblank_params:
+	 *
+	 * Vertical blanking IRQ parameters, passed to registered handlers when
+	 * triggered.
+	 */
 	struct common_irq_params
 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
 
-	/* this spin lock synchronizes access to 'irq_handler_list_table' */
 	spinlock_t irq_handler_list_table_lock;
 
 	struct backlight_device *backlight_dev;
 
 	const struct dc_link *backlight_link;
+	struct amdgpu_dm_backlight_caps backlight_caps;
 
 	struct mod_freesync *freesync_module;
 
-	/**
-	 * Caches device atomic state for suspend/resume
-	 */
 	struct drm_atomic_state *cached_state;
 
 	struct dm_comressor_info compressor;
@@ -183,15 +263,21 @@ struct dm_crtc_state {
 	int crc_skip_count;
 	bool crc_enabled;
 
-	bool freesync_enabled;
+	bool freesync_timing_changed;
+	bool freesync_vrr_info_changed;
+
+	bool vrr_supported;
+	struct mod_freesync_config freesync_config;
 	struct dc_crtc_timing_adjust adjust;
 	struct dc_info_packet vrr_infopacket;
+
+	int abm_level;
 };
 
 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
 
 struct dm_atomic_state {
-	struct drm_atomic_state base;
+	struct drm_private_state base;
 
 	struct dc_state *context;
 };
@@ -206,8 +292,8 @@ struct dm_connector_state {
 	uint8_t underscan_hborder;
 	uint8_t max_bpc;
 	bool underscan_enable;
-	bool freesync_enable;
 	bool freesync_capable;
+	uint8_t abm_level;
 };
 
 #define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index be19e68..216e48c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
 	 */
 	stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
 	ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
-						 gamma, true, adev->asic_type <= CHIP_RAVEN);
+						 gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
 	dc_gamma_release(&gamma);
 	if (!ret) {
 		stream->out_transfer_func->type = old_type;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 01fc571..f088ac5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
 		return -EINVAL;
 	}
 
+	if (!stream_state) {
+		DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+		return -EINVAL;
+	}
+
 	/* When enabling CRC, we should also disable dithering. */
 	if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
 		if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a212178..cd10f77 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -32,16 +32,55 @@
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_irq.h"
 
+/**
+ * DOC: overview
+ *
+ * DM provides another layer of IRQ management on top of what the base driver
+ * already provides. This is something that could be cleaned up, and is a
+ * future TODO item.
+ *
+ * The base driver provides IRQ source registration with DRM, handler
+ * registration into the base driver's IRQ table, and a handler callback
+ * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
+ * handler looks up the IRQ table, and calls the respective
+ * &amdgpu_irq_src_funcs.process hookups.
+ *
+ * What DM provides on top are two IRQ tables specifically for top-half and
+ * bottom-half IRQ handling, with the bottom-half implementing workqueues:
+ *
+ * - &amdgpu_display_manager.irq_handler_list_high_tab
+ * - &amdgpu_display_manager.irq_handler_list_low_tab
+ *
+ * They override the base driver's IRQ table, and the effect can be seen
+ * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
+ * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
+ * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
+ * still needs to register the IRQ with the base driver. See
+ * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
+ *
+ * To expose DC's hardware interrupt toggle to the base driver, DM implements
+ * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
+ * amdgpu_irq_update() to enable or disable the interrupt.
+ */
+
 /******************************************************************************
  * Private declarations.
  *****************************************************************************/
 
+/**
+ * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
+ *
+ * @list: Linked list entry referencing the next/previous handler
+ * @handler: Handler function
+ * @handler_arg: Argument passed to the handler when triggered
+ * @dm: DM which this handler belongs to
+ * @irq_source: DC interrupt source that this handler is registered for
+ */
 struct amdgpu_dm_irq_handler_data {
 	struct list_head list;
 	interrupt_handler handler;
 	void *handler_arg;
 
-	/* DM which this handler belongs to */
 	struct amdgpu_display_manager *dm;
 	/* DAL irq source which registered for this interrupt. */
 	enum dc_irq_source irq_source;
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
 }
 
 /**
- * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
  *
  * @work: work struct
  */
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work)
 	 * (The most common use is HPD interrupt) */
 }
 
-/**
- * Remove a handler and return a pointer to hander list from which the
+/*
+ * Remove a handler and return a pointer to handler list from which the
  * handler was removed.
  */
 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
  * Note: caller is responsible for input validation.
  *****************************************************************************/
 
+/**
+ * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
+ * @adev: The base driver device containing the DM device.
+ * @int_params: Interrupt parameters containing the source, and handler context
+ * @ih: Function pointer to the interrupt handler to register
+ * @handler_args: Arguments passed to the handler when the interrupt occurs
+ *
+ * Register an interrupt handler for the given IRQ source, under the given
+ * context. The context can either be high or low. High context handlers are
+ * executed directly within ISR context, while low context is executed within a
+ * workqueue, thereby allowing operations that sleep.
+ *
+ * Registered handlers are called in a FIFO manner, i.e. the most recently
+ * registered handler will be called first.
+ *
+ * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
+ *         source, handler function, and args
+ */
 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 				       struct dc_interrupt_params *int_params,
 				       void (*ih)(void *),
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 	return handler_data;
 }
 
+/**
+ * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ * @irq_source: IRQ source to remove the given handler from
+ * @ih: Function pointer to the interrupt handler to unregister
+ *
+ * Go through both low and high context IRQ tables, and find the given handler
+ * for the given irq source. If found, remove it. Otherwise, do nothing.
+ */
 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 					enum dc_irq_source irq_source,
 					void *ih)
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 	}
 }
 
+/**
+ * amdgpu_dm_irq_init() - Initialize DM IRQ management
+ * @adev:  The base driver device containing the DM device
+ *
+ * Initialize DM's high and low context IRQ tables.
+ *
+ * The N by M table contains N IRQ sources, with M
+ * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
+ * list_heads are initialized here. When an interrupt n is triggered, all m
+ * handlers are called in sequence, FIFO according to registration order.
+ *
+ * The low context table requires special steps to initialize, since handlers
+ * will be deferred to a workqueue. See &struct irq_list_head.
+ */
 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 {
 	int src;
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 	return 0;
 }
 
-/* DM IRQ and timer resource release */
+/**
+ * amdgpu_dm_irq_fini() - Tear down DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Flush all work within the low context IRQ table.
+ */
 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
 	int src;
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
 	return 0;
 }
 
-/**
+/*
  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
  * "irq_source".
  */
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 
 }
 
-/** amdgpu_dm_irq_immediate_work
- *  Callback high irq work immediately, don't send to work queue
+/*
+ * amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
  */
 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 					 enum dc_irq_source irq_source)
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 }
 
-/*
- * amdgpu_dm_irq_handler
+/**
+ * amdgpu_dm_irq_handler - Generic DM IRQ handler
+ * @adev: amdgpu base driver device containing the DM device
+ * @source: Unused
+ * @entry: Data about the triggered interrupt
  *
- * Generic IRQ handler, calls all registered high irq work immediately, and
- * schedules work for low irq
+ * Calls all registered high irq work immediately, and schedules work for low
+ * irq. The DM IRQ table is used to find the corresponding handlers.
  */
 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
 				 struct amdgpu_irq_src *source,
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 }
 
-/*
+/**
  * amdgpu_dm_hpd_init - hpd setup callback.
  *
  * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 12001a0..9d2d698 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
 		return;
 
 	clock.clock_type = amd_pp_dcf_clock;
-	clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+	clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
 	pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 
 	clock.clock_type = amd_pp_f_clock;
-	clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+	clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
 	pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 }
 
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
 			wm_dce_clocks[i].wm_set_id =
 					ranges->reader_wm_sets[i].wm_inst;
 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
-				ranges->reader_wm_sets[i].max_drain_clk_khz;
+				ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
-				ranges->reader_wm_sets[i].min_drain_clk_khz;
+				ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
-				ranges->reader_wm_sets[i].max_fill_clk_khz;
+				ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
-				ranges->reader_wm_sets[i].min_fill_clk_khz;
+				ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 	}
 
 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
 			wm_soc_clocks[i].wm_set_id =
 					ranges->writer_wm_sets[i].wm_inst;
 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
-				ranges->writer_wm_sets[i].max_fill_clk_khz;
+				ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
-				ranges->writer_wm_sets[i].min_fill_clk_khz;
+				ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
-				ranges->writer_wm_sets[i].max_drain_clk_khz;
+				ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
-				ranges->writer_wm_sets[i].min_drain_clk_khz;
+				ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 	}
 
 	pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644
index 0000000..d898981
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+	TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+	TP_ARGS(read_count, reg, value),
+	TP_STRUCT__entry(
+			__field(uint32_t, reg)
+			__field(uint32_t, value)
+		),
+	TP_fast_assign(
+			__entry->reg = reg;
+			__entry->value = value;
+			*read_count = *read_count + 1;
+		),
+	TP_printk("reg=0x%08lx, value=0x%08lx",
+			(unsigned long)__entry->reg,
+			(unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+	TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+	TP_ARGS(write_count, reg, value),
+	TP_STRUCT__entry(
+			__field(uint32_t, reg)
+			__field(uint32_t, value)
+		),
+	TP_fast_assign(
+			__entry->reg = reg;
+			__entry->value = value;
+			*write_count = *write_count + 1;
+		),
+	TP_printk("reg=0x%08lx, value=0x%08lx",
+			(unsigned long)__entry->reg,
+			(unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+	TP_PROTO(unsigned long read_count, unsigned long write_count,
+		unsigned long *last_read, unsigned long *last_write,
+		const char *func, unsigned int line),
+	TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+	TP_STRUCT__entry(
+			__field(uint32_t, reads)
+			__field(uint32_t, writes)
+			__field(uint32_t, read_delta)
+			__field(uint32_t, write_delta)
+			__string(func, func)
+			__field(uint32_t, line)
+			),
+	TP_fast_assign(
+			__entry->reads = read_count;
+			__entry->writes = write_count;
+			__entry->read_delta = read_count - *last_read;
+			__entry->write_delta = write_count - *last_write;
+			__assign_str(func, func);
+			__entry->line = line;
+			*last_read = read_count;
+			*last_write = write_count;
+			),
+	TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+			__get_str(func), __entry->line,
+			(unsigned long)__entry->read_delta,
+			(unsigned long)__entry->reads,
+			(unsigned long)__entry->write_delta,
+			(unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 0e1dc1b..c2ab026 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
 static struct device_id device_type_from_device_id(uint16_t device_id)
 {
 
-	struct device_id result_device_id;
+	struct device_id result_device_id = {0};
 
 	switch (device_id) {
 	case ATOM_DEVICE_LCD1_SUPPORT:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ff764da..751bb61 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
 
 	.is_accelerated_mode = bios_parser_is_accelerated_mode,
 
+	.is_active_display = bios_is_active_display,
+
 	.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index d458947..fdda8aa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays(
 	return active_disp;
 }
 
+bool bios_is_active_display(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag)
+{
+	uint32_t active = 0;
+	uint32_t connected = 0;
+	uint32_t bios_scratch_0 = 0;
+	uint32_t bios_scratch_3 = 0;
+
+	switch (signal)	{
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		{
+			if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
+				switch (device_tag->dev_id.enum_id)	{
+				case 1:
+					{
+						active    = ATOM_S3_DFP1_ACTIVE;
+						connected = 0x0008;	//ATOM_DISPLAY_DFP1_CONNECT
+					}
+					break;
+
+				case 2:
+					{
+						active    = ATOM_S3_DFP2_ACTIVE;
+						connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
+					}
+					break;
+
+				case 3:
+					{
+						active    = ATOM_S3_DFP3_ACTIVE;
+						connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
+					}
+					break;
+
+				case 4:
+					{
+						active    = ATOM_S3_DFP4_ACTIVE;
+						connected = 0x0400;	//ATOM_DISPLAY_DFP4_CONNECT
+					}
+					break;
+
+				case 5:
+					{
+						active    = ATOM_S3_DFP5_ACTIVE;
+						connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
+					}
+					break;
+
+				case 6:
+					{
+						active    = ATOM_S3_DFP6_ACTIVE;
+						connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
+					}
+					break;
+
+				default:
+					break;
+				}
+				}
+			}
+			break;
+
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_EDP:
+		{
+			active    = ATOM_S3_LCD1_ACTIVE;
+			connected = 0x0002;	//ATOM_DISPLAY_LCD1_CONNECT
+		}
+		break;
+
+	default:
+		break;
+	}
+
+
+	if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
+		bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
+	if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+		bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
+
+	bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
+	if ((active & bios_scratch_3) && (connected & bios_scratch_0))
+		return true;
+
+	return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index 75a29e6..f33cac2 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
 void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
 void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
 uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
+bool bios_is_active_display(
+	struct dc_bios *bios,
+	enum signal_type signal,
+	const struct connector_device_tag_info *device_tag);
 
 #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
 
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3208188..43e4a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 	ranges.num_reader_wm_sets = WM_SET_COUNT;
 	ranges.num_writer_wm_sets = WM_SET_COUNT;
 	ranges.reader_wm_sets[0].wm_inst = WM_A;
-	ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
-	ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
-	ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
-	ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+	ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
+	ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
+	ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
+	ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
 	ranges.writer_wm_sets[0].wm_inst = WM_A;
-	ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
-	ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
-	ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
-	ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+	ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
+	ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
+	ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
+	ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
 		ranges.reader_wm_sets[0].wm_inst = WM_A;
-		ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
-		ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
-		ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
-		ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+		ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
+		ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
+		ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
+		ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
 		ranges.writer_wm_sets[0].wm_inst = WM_A;
-		ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
-		ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
-		ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
-		ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+		ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
+		ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
+		ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
+		ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
 	}
 
 	ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7c491c9..d9c5798 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -62,6 +62,55 @@
 
 const static char DC_BUILD_ID[] = "production-build";
 
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct.  One per driver.  Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints).  Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display.  Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
+ * (the display directly attached).  It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver.  Represents the hw blocks not in the
+ * main pipeline.  Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed.  There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display.  Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool.  Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context.  Represents the
+ * internal hardware pipeline components.  Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
@@ -175,6 +224,17 @@ static bool create_links(
 	return false;
 }
 
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+	return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+	kfree(*perf_trace);
+	*perf_trace = NULL;
+}
+
 /**
  *****************************************************************************
  *  Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +300,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
 }
 
 /**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
  * @dc: DC Object
  * @stream: The stream to configure CRC on.
  * @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +352,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 }
 
 /**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
  * @dc: DC object
  * @stream: The DC stream state of the stream to get CRCs from.
  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -328,7 +388,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
 		enum dc_dither_option option)
 {
 	struct bit_depth_reduction_params params;
-	struct dc_link *link = stream->status.link;
+	struct dc_link *link = stream->sink->link;
 	struct pipe_ctx *pipes = NULL;
 	int i;
 
@@ -391,9 +451,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 				== stream) {
 
 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
-			dc->hwss.program_csc_matrix(pipes,
-			stream->output_color_space,
-			stream->csc_color_matrix.matrix);
+			dc->hwss.program_output_csc(dc,
+					pipes,
+					stream->output_color_space,
+					stream->csc_color_matrix.matrix,
+					pipes->plane_res.hubp->opp_id);
 			ret = true;
 		}
 	}
@@ -534,6 +596,8 @@ static void destruct(struct dc *dc)
 	if (dc->ctx->created_bios)
 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
 
+	dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
 	kfree(dc->ctx);
 	dc->ctx = NULL;
 
@@ -657,6 +721,12 @@ static bool construct(struct dc *dc,
 		goto fail;
 	}
 
+	dc_ctx->perf_trace = dc_perf_trace_create();
+	if (!dc_ctx->perf_trace) {
+		ASSERT_CRITICAL(false);
+		goto fail;
+	}
+
 	/* Create GPIO service */
 	dc_ctx->gpio_service = dal_gpio_service_create(
 			dc_version,
@@ -941,7 +1011,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	if (!dcb->funcs->is_accelerated_mode(dcb))
 		dc->hwss.enable_accelerated_mode(dc, context);
 
-	dc->hwss.set_bandwidth(dc, context, false);
+	dc->hwss.prepare_bandwidth(dc, context);
 
 	/* re-program planes for existing stream, in case we need to
 	 * free up plane resource for later use
@@ -957,8 +1027,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	}
 
 	/* Program hardware */
-	dc->hwss.ready_shared_resources(dc, context);
-
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		pipe = &context->res_ctx.pipe_ctx[i];
 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
@@ -1012,7 +1080,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
 	/* pplib is notified if disp_num changed */
-	dc->hwss.set_bandwidth(dc, context, true);
+	dc->hwss.optimize_bandwidth(dc, context);
 
 	dc_release_state(dc->current_state);
 
@@ -1020,8 +1088,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
 	dc_retain_state(dc->current_state);
 
-	dc->hwss.optimize_shared_resources(dc);
-
 	return result;
 }
 
@@ -1063,7 +1129,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 
 	dc->optimized_required = false;
 
-	dc->hwss.set_bandwidth(dc, context, true);
+	dc->hwss.optimize_bandwidth(dc, context);
 	return true;
 }
 
@@ -1331,6 +1397,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
 	return overall_type;
 }
 
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
 enum surface_update_type dc_check_update_surfaces_for_stream(
 		struct dc *dc,
 		struct dc_surface_update *updates,
@@ -1369,35 +1440,6 @@ static struct dc_stream_status *stream_get_status(
 
 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
 
-static void notify_display_count_to_smu(
-		struct dc *dc,
-		struct dc_state *context)
-{
-	int i, display_count;
-	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
-	/*
-	 * if function pointer not set up, this message is
-	 * sent as part of pplib_apply_display_requirements.
-	 * So just return.
-	 */
-	if (!pp_smu || !pp_smu->set_display_count)
-		return;
-
-	display_count = 0;
-	for (i = 0; i < context->stream_count; i++) {
-		const struct dc_stream_state *stream = context->streams[i];
-
-		/* only notify active stream */
-		if (stream->dpms_off)
-			continue;
-
-		display_count++;
-	}
-
-	pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-}
-
 static void commit_planes_do_stream_update(struct dc *dc,
 		struct dc_stream_state *stream,
 		struct dc_stream_update *stream_update,
@@ -1422,7 +1464,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
 					stream_update->adjust->v_total_max);
 
 			if (stream_update->periodic_fn_vsync_delta &&
-					pipe_ctx->stream_res.tg &&
 					pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
 				pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
@@ -1441,6 +1482,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
 			if (stream_update->output_csc_transform)
 				dc_stream_program_csc_matrix(dc, stream);
 
+			if (stream_update->dither_option) {
+				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+									&pipe_ctx->stream->bit_depth_params);
+				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+						&stream->bit_depth_params,
+						&stream->clamping);
+			}
+
 			/* Full fe update*/
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
@@ -1448,19 +1497,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
 			if (stream_update->dpms_off) {
 				if (*stream_update->dpms_off) {
 					core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
-					dc->hwss.pplib_apply_display_requirements(
-						dc, dc->current_state);
-					notify_display_count_to_smu(dc, dc->current_state);
+					dc->hwss.optimize_bandwidth(dc, dc->current_state);
 				} else {
-					dc->hwss.pplib_apply_display_requirements(
-						dc, dc->current_state);
-					notify_display_count_to_smu(dc, dc->current_state);
+					dc->hwss.prepare_bandwidth(dc, dc->current_state);
 					core_link_enable_stream(dc->current_state, pipe_ctx);
 				}
 			}
 
-
-
 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
 				if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
 					// if otg funcs defined check if blanked before programming
@@ -1487,7 +1530,7 @@ static void commit_planes_for_stream(struct dc *dc,
 	struct pipe_ctx *top_pipe_to_program = NULL;
 
 	if (update_type == UPDATE_TYPE_FULL) {
-		dc->hwss.set_bandwidth(dc, context, false);
+		dc->hwss.prepare_bandwidth(dc, context);
 		context_clock_trace(dc, context);
 	}
 
@@ -1669,6 +1712,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
 }
 
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
 {
 
@@ -1724,6 +1770,15 @@ void dc_resume(struct dc *dc)
 		core_link_resume(dc->links[i]);
 }
 
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+	struct dmcu *dmcu = dc->res_pool->dmcu;
+
+	if (dmcu)
+		return dmcu->funcs->is_dmcu_initialized(dmcu);
+	return false;
+}
+
 bool dc_submit_i2c(
 		struct dc *dc,
 		uint32_t link_index,
@@ -1753,6 +1808,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
 	return true;
 }
 
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
 struct dc_sink *dc_link_add_remote_sink(
 		struct dc_link *link,
 		const uint8_t *edid,
@@ -1811,6 +1871,12 @@ struct dc_sink *dc_link_add_remote_sink(
 	return NULL;
 }
 
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
 {
 	int i;
@@ -1848,4 +1914,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
 	info->dcfClockDeepSleep			= (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
 	info->fClock					= (unsigned int)state->bw.dcn.clk.fclk_khz;
 	info->phyClock					= (unsigned int)state->bw.dcn.clk.phyclk_khz;
-}
\ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index e1ebdf7..73d0495 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -311,7 +311,7 @@ void context_timing_trace(
 {
 	int i;
 	struct dc  *core_dc = dc;
-	int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+	int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
 	struct crtc_position position;
 	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
 	DC_LOGGER_INIT(dc->ctx->logger);
@@ -322,8 +322,7 @@ void context_timing_trace(
 		/* get_position() returns CRTC vertical/horizontal counter
 		 * hence not applicable for underlay pipe
 		 */
-		if (pipe_ctx->stream == NULL
-				 || pipe_ctx->pipe_idx == underlay_idx)
+		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
 			continue;
 
 		pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
@@ -333,7 +332,7 @@ void context_timing_trace(
 	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-		if (pipe_ctx->stream == NULL)
+		if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
 			continue;
 
 		TIMING_TRACE("OTG_%d   H_tot:%d  V_tot:%d   H_pos:%d  V_pos:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 5da2186..4dc5846 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
 	return result;
 }
 
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
 bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
 	uint32_t is_hpd_high = 0;
@@ -324,9 +331,9 @@ static enum signal_type get_basic_signal_type(
 	return SIGNAL_TYPE_NONE;
 }
 
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
  */
 bool dc_link_is_dp_sink_present(struct dc_link *link)
 {
@@ -593,6 +600,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
 	return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
 }
 
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 {
 	struct dc_sink_init_data sink_init_data = { 0 };
@@ -1357,28 +1372,13 @@ static enum dc_status enable_link_dp(
 	struct dc_link *link = stream->sink->link;
 	struct dc_link_settings link_settings = {0};
 	enum dp_panel_mode panel_mode;
-	enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
 
 	/* get link settings for video mode timing */
 	decide_link_settings(stream, &link_settings);
 
-	/* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
-	 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
-	 */
-	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-		max_link_rate = LINK_RATE_HIGH3;
-
-	if (link_settings.link_rate == max_link_rate) {
-		struct dc_clocks clocks = state->bw.dcn.clk;
-
-		/* dce/dcn compat, do not update dispclk */
-		clocks.dispclk_khz = 0;
-		/* 27mhz = 27000000hz= 27000khz */
-		clocks.phyclk_khz = link_settings.link_rate * 27000;
-
-		state->dis_clk->funcs->update_clocks(
-				state->dis_clk, &clocks, false);
-	}
+	pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+			link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+	state->dccg->funcs->update_clocks(state->dccg, state, false);
 
 	dp_enable_link_phy(
 		link,
@@ -1411,8 +1411,6 @@ static enum dc_status enable_link_dp(
 	else
 		status = DC_FAIL_DP_LINK_TRAINING;
 
-	enable_stream_features(pipe_ctx);
-
 	return status;
 }
 
@@ -2156,14 +2154,16 @@ int dc_link_get_backlight_level(const struct dc_link *link)
 {
 	struct abm *abm = link->ctx->dc->res_pool->abm;
 
-	if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+	if (abm == NULL || abm->funcs->get_current_backlight == NULL)
 		return DC_ERROR_UNEXPECTED;
 
-	return (int) abm->funcs->get_current_backlight_8_bit(abm);
+	return (int) abm->funcs->get_current_backlight(abm);
 }
 
-bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
-		uint32_t frame_ramp, const struct dc_stream_state *stream)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+		uint32_t backlight_pwm_u16_16,
+		uint32_t frame_ramp,
+		const struct dc_stream_state *stream)
 {
 	struct dc  *core_dc = link->ctx->dc;
 	struct abm *abm = core_dc->res_pool->abm;
@@ -2175,26 +2175,24 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 
 	if ((dmcu == NULL) ||
 		(abm == NULL) ||
-		(abm->funcs->set_backlight_level == NULL))
+		(abm->funcs->set_backlight_level_pwm == NULL))
 		return false;
 
-	if (stream) {
-		if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
-			frame_ramp = 0;
-
-		((struct dc_stream_state *)stream)->bl_pwm_level = level;
-	}
+	if (stream)
+		((struct dc_stream_state *)stream)->bl_pwm_level =
+				backlight_pwm_u16_16;
 
 	use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
+	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+			backlight_pwm_u16_16, backlight_pwm_u16_16);
 
 	if (dc_is_embedded_signal(link->connector_signal)) {
-		if (stream != NULL) {
-			for (i = 0; i < MAX_PIPES; i++) {
+		for (i = 0; i < MAX_PIPES; i++) {
+			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
 				if (core_dc->current_state->res_ctx.
-						pipe_ctx[i].stream
-						== stream)
+						pipe_ctx[i].stream->sink->link
+						== link)
 					/* DMCU -1 for all controller id values,
 					 * therefore +1 here
 					 */
@@ -2204,9 +2202,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 						1;
 			}
 		}
-		abm->funcs->set_backlight_level(
+		abm->funcs->set_backlight_level_pwm(
 				abm,
-				level,
+				backlight_pwm_u16_16,
 				frame_ramp,
 				controller_id,
 				use_smooth_brightness);
@@ -2220,7 +2218,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
 	struct dc  *core_dc = link->ctx->dc;
 	struct abm *abm = core_dc->res_pool->abm;
 
-	if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+	if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
 		return false;
 
 	abm->funcs->set_abm_immediate_disable(abm);
@@ -2233,7 +2231,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
 	struct dc  *core_dc = link->ctx->dc;
 	struct dmcu *dmcu = core_dc->res_pool->dmcu;
 
-	if (dmcu != NULL && link->psr_enabled)
+	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
 		dmcu->funcs->set_psr_enable(dmcu, enable, wait);
 
 	return true;
@@ -2609,6 +2607,13 @@ void core_link_enable_stream(
 		core_dc->hwss.unblank_stream(pipe_ctx,
 			&pipe_ctx->stream->sink->link->cur_link_settings);
 
+		if (dc_is_dp_signal(pipe_ctx->stream->signal))
+			enable_stream_features(pipe_ctx);
+
+		dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
+				pipe_ctx->stream->bl_pwm_level,
+				0,
+				pipe_ctx->stream);
 	}
 
 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d91df5e..849a3a3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2196,7 +2196,7 @@ static void get_active_converter_info(
 	}
 
 	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
-		uint8_t det_caps[4];
+		uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
 		union dwnstream_port_caps_byte0 *port_caps =
 			(union dwnstream_port_caps_byte0 *)det_caps;
 		core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -2371,11 +2371,22 @@ static bool retrieve_link_cap(struct dc_link *link)
 			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
 
 		if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
-			core_link_read_dpcd(
+			uint8_t ext_cap_data[16];
+
+			memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+			for (i = 0; i < read_dpcd_retry_cnt; i++) {
+				status = core_link_read_dpcd(
 				link,
 				DP_DP13_DPCD_REV,
-				dpcd_data,
-				sizeof(dpcd_data));
+				ext_cap_data,
+				sizeof(ext_cap_data));
+				if (status == DC_OK) {
+					memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+					break;
+				}
+			}
+			if (status != DC_OK)
+				dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b6fe29b..c347afd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -478,10 +478,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
 	return dal_pixel_format;
 }
 
-static void rect_swap_helper(struct rect *rect)
+static inline void get_vp_scan_direction(
+	enum dc_rotation_angle rotation,
+	bool horizontal_mirror,
+	bool *orthogonal_rotation,
+	bool *flip_vert_scan_dir,
+	bool *flip_horz_scan_dir)
 {
-	swap(rect->height, rect->width);
-	swap(rect->x, rect->y);
+	*orthogonal_rotation = false;
+	*flip_vert_scan_dir = false;
+	*flip_horz_scan_dir = false;
+	if (rotation == ROTATION_ANGLE_180) {
+		*flip_vert_scan_dir = true;
+		*flip_horz_scan_dir = true;
+	} else if (rotation == ROTATION_ANGLE_90) {
+		*orthogonal_rotation = true;
+		*flip_horz_scan_dir = true;
+	} else if (rotation == ROTATION_ANGLE_270) {
+		*orthogonal_rotation = true;
+		*flip_vert_scan_dir = true;
+	}
+
+	if (horizontal_mirror)
+		*flip_horz_scan_dir = !*flip_horz_scan_dir;
 }
 
 static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -490,25 +509,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	const struct dc_stream_state *stream = pipe_ctx->stream;
 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect surf_src = plane_state->src_rect;
-	struct rect clip = { 0 };
+	struct rect clip, dest;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
 	bool pri_split = pipe_ctx->bottom_pipe &&
 			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
 	bool sec_split = pipe_ctx->top_pipe &&
 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
-	bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
-	/*
-	 * Need to calculate the scan direction for viewport to properly determine offset
-	 */
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
-		flip_vert_scan_dir = true;
-		flip_horz_scan_dir = true;
-	} else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
-		flip_vert_scan_dir = true;
-	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		flip_horz_scan_dir = true;
+	bool orthogonal_rotation, flip_y_start, flip_x_start;
 
 	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
@@ -516,13 +524,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 		sec_split = false;
 	}
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
-
 	/* The actual clip is an intersection between stream
 	 * source and surface clip
 	 */
+	dest = plane_state->dst_rect;
 	clip.x = stream->src.x > plane_state->clip_rect.x ?
 			stream->src.x : plane_state->clip_rect.x;
 
@@ -539,84 +544,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 			stream->src.y + stream->src.height - clip.y :
 			plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
 
+	/*
+	 * Need to calculate how scan origin is shifted in vp space
+	 * to correctly rotate clip and dst
+	 */
+	get_vp_scan_direction(
+			plane_state->rotation,
+			plane_state->horizontal_mirror,
+			&orthogonal_rotation,
+			&flip_y_start,
+			&flip_x_start);
+
+	if (orthogonal_rotation) {
+		swap(clip.x, clip.y);
+		swap(clip.width, clip.height);
+		swap(dest.x, dest.y);
+		swap(dest.width, dest.height);
+	}
+	if (flip_x_start) {
+		clip.x = dest.x + dest.width - clip.x - clip.width;
+		dest.x = 0;
+	}
+	if (flip_y_start) {
+		clip.y = dest.y + dest.height - clip.y - clip.height;
+		dest.y = 0;
+	}
+
 	/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
 	 * num_pixels = clip.num_pix * scl_ratio
 	 */
-	data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
-			surf_src.width / plane_state->dst_rect.width;
-	data->viewport.width = clip.width *
-			surf_src.width / plane_state->dst_rect.width;
+	data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+	data->viewport.width = clip.width * surf_src.width / dest.width;
 
-	data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
-			surf_src.height / plane_state->dst_rect.height;
-	data->viewport.height = clip.height *
-			surf_src.height / plane_state->dst_rect.height;
+	data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+	data->viewport.height = clip.height * surf_src.height / dest.height;
 
-	/* To transfer the x, y to correct coordinate on mirror image (camera).
-	 * deg  0 : transfer x,
-	 * deg 90 : don't need to transfer,
-	 * deg180 : transfer y,
-	 * deg270 : transfer x and y.
-	 * To transfer the x, y to correct coordinate on non-mirror image (video).
-	 * deg  0 : don't need to transfer,
-	 * deg 90 : transfer y,
-	 * deg180 : transfer x and y,
-	 * deg270 : transfer x.
-	 */
-	if (pipe_ctx->plane_state->horizontal_mirror) {
-		if (flip_horz_scan_dir && !flip_vert_scan_dir) {
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-			data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-		} else if (flip_horz_scan_dir && flip_vert_scan_dir)
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
-		else {
-			if (!flip_horz_scan_dir && !flip_vert_scan_dir)
-				data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+	/* Handle split */
+	if (pri_split || sec_split) {
+		if (orthogonal_rotation) {
+			if (flip_y_start != pri_split)
+				data->viewport.height /= 2;
+			else {
+				data->viewport.y +=  data->viewport.height / 2;
+				/* Ceil offset pipe */
+				data->viewport.height = (data->viewport.height + 1) / 2;
+			}
+		} else {
+			if (flip_x_start != pri_split)
+				data->viewport.width /= 2;
+			else {
+				data->viewport.x +=  data->viewport.width / 2;
+				/* Ceil offset pipe */
+				data->viewport.width = (data->viewport.width + 1) / 2;
+			}
 		}
-	} else {
-		if (flip_horz_scan_dir)
-			data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
-		if (flip_vert_scan_dir)
-			data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
 	}
 
 	/* Round down, compensate in init */
 	data->viewport_c.x = data->viewport.x / vpc_div;
 	data->viewport_c.y = data->viewport.y / vpc_div;
-	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
-			dc_fixpt_half : dc_fixpt_zero;
-	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
-			dc_fixpt_half : dc_fixpt_zero;
+	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+
 	/* Round up, assume original video size always even dimensions */
 	data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
 	data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
-	/* Handle hsplit */
-	if (sec_split) {
-		data->viewport.x +=  data->viewport.width / 2;
-		data->viewport_c.x +=  data->viewport_c.width / 2;
-		/* Ceil offset pipe */
-		data->viewport.width = (data->viewport.width + 1) / 2;
-		data->viewport_c.width = (data->viewport_c.width + 1) / 2;
-	} else if (pri_split) {
-		if (data->viewport.width > 1)
-			data->viewport.width /= 2;
-		if (data->viewport_c.width > 1)
-			data->viewport_c.width /= 2;
-	}
-
-	if (plane_state->rotation == ROTATION_ANGLE_90 ||
-			plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
-	}
 }
 
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	const struct dc_stream_state *stream = pipe_ctx->stream;
-	struct rect surf_src = plane_state->src_rect;
 	struct rect surf_clip = plane_state->clip_rect;
 	bool pri_split = pipe_ctx->bottom_pipe &&
 			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
@@ -624,10 +622,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
 	bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
-
 	pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
 	if (stream->src.x < surf_clip.x)
 		pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
@@ -656,7 +650,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 			stream->dst.y + stream->dst.height
 						- pipe_ctx->plane_res.scl_data.recout.y;
 
-	/* Handle h & vsplit */
+	/* Handle h & v split, handle rotation using viewport */
 	if (sec_split && top_bottom_split) {
 		pipe_ctx->plane_res.scl_data.recout.y +=
 				pipe_ctx->plane_res.scl_data.recout.height / 2;
@@ -665,44 +659,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
 				(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
 	} else if (pri_split && top_bottom_split)
 		pipe_ctx->plane_res.scl_data.recout.height /= 2;
-	else if (pri_split || sec_split) {
-		/* HMirror XOR Secondary_pipe XOR Rotation_180 */
-		bool right_view = (sec_split != plane_state->horizontal_mirror) !=
-					(plane_state->rotation == ROTATION_ANGLE_180);
-
-		if (plane_state->rotation == ROTATION_ANGLE_90
-				|| plane_state->rotation == ROTATION_ANGLE_270)
-			/* Secondary_pipe XOR Rotation_270 */
-			right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
-		if (right_view) {
-			pipe_ctx->plane_res.scl_data.recout.x +=
-					pipe_ctx->plane_res.scl_data.recout.width / 2;
-			/* Ceil offset pipe */
-			pipe_ctx->plane_res.scl_data.recout.width =
-					(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
-		} else {
-			if (pipe_ctx->plane_res.scl_data.recout.width > 1)
-				pipe_ctx->plane_res.scl_data.recout.width /= 2;
-		}
-	}
-	/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
-	 *			* 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
-	 *			ratio)
-	 */
-	recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
-					* stream->dst.width / stream->src.width -
-			surf_src.x * plane_state->dst_rect.width / surf_src.width
-					* stream->dst.width / stream->src.width;
-	recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
-					* stream->dst.height / stream->src.height -
-			surf_src.y * plane_state->dst_rect.height / surf_src.height
-					* stream->dst.height / stream->src.height;
-
-	recout_full->width = plane_state->dst_rect.width
-					* stream->dst.width / stream->src.width;
-	recout_full->height = plane_state->dst_rect.height
-					* stream->dst.height / stream->src.height;
+	else if (sec_split) {
+		pipe_ctx->plane_res.scl_data.recout.x +=
+				pipe_ctx->plane_res.scl_data.recout.width / 2;
+		/* Ceil offset pipe */
+		pipe_ctx->plane_res.scl_data.recout.width =
+				(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+	} else if (pri_split)
+		pipe_ctx->plane_res.scl_data.recout.width /= 2;
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -715,9 +679,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 	const int out_w = stream->dst.width;
 	const int out_h = stream->dst.height;
 
+	/*Swap surf_src height and width since scaling ratios are in recout rotation*/
 	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
 			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		rect_swap_helper(&surf_src);
+		swap(surf_src.height, surf_src.width);
 
 	pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
 					surf_src.width,
@@ -754,42 +719,133 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static inline void adjust_vp_and_init_for_seamless_clip(
+		bool flip_scan_dir,
+		int recout_skip,
+		int src_size,
+		int taps,
+		struct fixed31_32 ratio,
+		struct fixed31_32 *init,
+		int *vp_offset,
+		int *vp_size)
 {
+	if (!flip_scan_dir) {
+		/* Adjust for viewport end clip-off */
+		if ((*vp_offset + *vp_size) < src_size) {
+			int vp_clip = src_size - *vp_size - *vp_offset;
+			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+
+			int_part = int_part > 0 ? int_part : 0;
+			*vp_size += int_part < vp_clip ? int_part : vp_clip;
+		}
+
+		/* Adjust for non-0 viewport offset */
+		if (*vp_offset) {
+			int int_part;
+
+			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+			int_part = dc_fixpt_floor(*init) - *vp_offset;
+			if (int_part < taps) {
+				int int_adj = *vp_offset >= (taps - int_part) ?
+							(taps - int_part) : *vp_offset;
+				*vp_offset -= int_adj;
+				*vp_size += int_adj;
+				int_part += int_adj;
+			} else if (int_part > taps) {
+				*vp_offset += int_part - taps;
+				*vp_size -= int_part - taps;
+				int_part = taps;
+			}
+			init->value &= 0xffffffff;
+			*init = dc_fixpt_add_int(*init, int_part);
+		}
+	} else {
+		/* Adjust for non-0 viewport offset */
+		if (*vp_offset) {
+			int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
+
+			int_part = int_part > 0 ? int_part : 0;
+			*vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+			*vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
+		}
+
+		/* Adjust for viewport end clip-off */
+		if ((*vp_offset + *vp_size) < src_size) {
+			int int_part;
+			int end_offset = src_size - *vp_offset - *vp_size;
+
+			/*
+			 * this is init if vp had no offset, keep in mind this is from the
+			 * right side of vp due to scan direction
+			 */
+			*init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+			/*
+			 * this is the difference between first pixel of viewport available to read
+			 * and init position, takning into account scan direction
+			 */
+			int_part = dc_fixpt_floor(*init) - end_offset;
+			if (int_part < taps) {
+				int int_adj = end_offset >= (taps - int_part) ?
+							(taps - int_part) : end_offset;
+				*vp_size += int_adj;
+				int_part += int_adj;
+			} else if (int_part > taps) {
+				*vp_size += int_part - taps;
+				int_part = taps;
+			}
+			init->value &= 0xffffffff;
+			*init = dc_fixpt_add_int(*init, int_part);
+		}
+	}
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	const struct dc_stream_state *stream = pipe_ctx->stream;
 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect src = pipe_ctx->plane_state->src_rect;
+	int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
-	bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
+	bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
 
 	/*
 	 * Need to calculate the scan direction for viewport to make adjustments
 	 */
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
-		flip_vert_scan_dir = true;
-		flip_horz_scan_dir = true;
-	} else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
-		flip_vert_scan_dir = true;
-	else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
-		flip_horz_scan_dir = true;
+	get_vp_scan_direction(
+			plane_state->rotation,
+			plane_state->horizontal_mirror,
+			&orthogonal_rotation,
+			&flip_vert_scan_dir,
+			&flip_horz_scan_dir);
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&src);
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
+	/* Calculate src rect rotation adjusted to recout space */
+	surf_size_h = src.x + src.width;
+	surf_size_v = src.y + src.height;
+	if (flip_horz_scan_dir)
+		src.x = 0;
+	if (flip_vert_scan_dir)
+		src.y = 0;
+	if (orthogonal_rotation) {
+		swap(src.x, src.y);
+		swap(src.width, src.height);
+	}
 
-		if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
-			pipe_ctx->plane_state->horizontal_mirror) {
-			flip_vert_scan_dir = true;
-		}
-		if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
-			pipe_ctx->plane_state->horizontal_mirror) {
-			flip_vert_scan_dir = false;
-		}
-	} else if (pipe_ctx->plane_state->horizontal_mirror)
-			flip_horz_scan_dir = !flip_horz_scan_dir;
-
+	/* Recout matching initial vp offset = recout_offset - (stream dst offset +
+	 *			((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+	 *			- (surf surf_src offset * 1/ full scl ratio))
+	 */
+	recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+					* stream->dst.width / stream->src.width -
+					src.x * plane_state->dst_rect.width / src.width
+					* stream->dst.width / stream->src.width);
+	recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+					* stream->dst.height / stream->src.height -
+					src.y * plane_state->dst_rect.height / src.height
+					* stream->dst.height / stream->src.height);
+	if (orthogonal_rotation)
+		swap(recout_skip_h, recout_skip_v);
 	/*
 	 * Init calculated according to formula:
 	 * 	init = (scaling_ratio + number_of_taps + 1) / 2
@@ -808,304 +864,57 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r
 	data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
 			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
 
-	if (!flip_horz_scan_dir) {
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
-			int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
-		}
-		if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
-			int vp_clip = (src.x + src.width) / vpc_div -
-					data->viewport_c.width - data->viewport_c.x;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
-		}
-
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.x) {
-			int int_part;
-
-			data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-					data->ratios.horz, data->recout.x - recout_full->x));
-			int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
-			if (int_part < data->taps.h_taps) {
-				int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
-							(data->taps.h_taps - int_part) : data->viewport.x;
-				data->viewport.x -= int_adj;
-				data->viewport.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps) {
-				data->viewport.x += int_part - data->taps.h_taps;
-				data->viewport.width -= int_part - data->taps.h_taps;
-				int_part = data->taps.h_taps;
-			}
-			data->inits.h.value &= 0xffffffff;
-			data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-		}
-
-		if (data->viewport_c.x) {
-			int int_part;
-
-			data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-					data->ratios.horz_c, data->recout.x - recout_full->x));
-			int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
-			if (int_part < data->taps.h_taps_c) {
-				int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
-						(data->taps.h_taps_c - int_part) : data->viewport_c.x;
-				data->viewport_c.x -= int_adj;
-				data->viewport_c.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps_c) {
-				data->viewport_c.x += int_part - data->taps.h_taps_c;
-				data->viewport_c.width -= int_part - data->taps.h_taps_c;
-				int_part = data->taps.h_taps_c;
-			}
-			data->inits.h_c.value &= 0xffffffff;
-			data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-		}
-	} else {
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.x) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
-			data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
-		}
-		if (data->viewport_c.x) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
-			data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
-		}
-
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
-			int int_part;
-			int end_offset = src.x + src.width
-					- data->viewport.x - data->viewport.width;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-					data->ratios.horz, data->recout.x - recout_full->x));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, takning into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.h) - end_offset;
-			if (int_part < data->taps.h_taps) {
-				int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
-							(data->taps.h_taps - int_part) : end_offset;
-				data->viewport.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps) {
-				data->viewport.width += int_part - data->taps.h_taps;
-				int_part = data->taps.h_taps;
-			}
-			data->inits.h.value &= 0xffffffff;
-			data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-		}
-
-		if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
-			int int_part;
-			int end_offset = (src.x + src.width) / vpc_div
-					- data->viewport_c.x - data->viewport_c.width;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-					data->ratios.horz_c, data->recout.x - recout_full->x));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, takning into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
-			if (int_part < data->taps.h_taps_c) {
-				int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
-							(data->taps.h_taps_c - int_part) : end_offset;
-				data->viewport_c.width += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.h_taps_c) {
-				data->viewport_c.width += int_part - data->taps.h_taps_c;
-				int_part = data->taps.h_taps_c;
-			}
-			data->inits.h_c.value &= 0xffffffff;
-			data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-		}
-
-	}
-	if (!flip_vert_scan_dir) {
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
-			int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
-		}
-		if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
-			int vp_clip = (src.y + src.height) / vpc_div -
-					data->viewport_c.height - data->viewport_c.y;
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
-		}
-
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.y) {
-			int int_part;
-
-			data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-					data->ratios.vert, data->recout.y - recout_full->y));
-			int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
-			if (int_part < data->taps.v_taps) {
-				int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
-							(data->taps.v_taps - int_part) : data->viewport.y;
-				data->viewport.y -= int_adj;
-				data->viewport.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps) {
-				data->viewport.y += int_part - data->taps.v_taps;
-				data->viewport.height -= int_part - data->taps.v_taps;
-				int_part = data->taps.v_taps;
-			}
-			data->inits.v.value &= 0xffffffff;
-			data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-		}
-
-		if (data->viewport_c.y) {
-			int int_part;
-
-			data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-					data->ratios.vert_c, data->recout.y - recout_full->y));
-			int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
-			if (int_part < data->taps.v_taps_c) {
-				int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
-						(data->taps.v_taps_c - int_part) : data->viewport_c.y;
-				data->viewport_c.y -= int_adj;
-				data->viewport_c.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps_c) {
-				data->viewport_c.y += int_part - data->taps.v_taps_c;
-				data->viewport_c.height -= int_part - data->taps.v_taps_c;
-				int_part = data->taps.v_taps_c;
-			}
-			data->inits.v_c.value &= 0xffffffff;
-			data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
-		}
-	} else {
-		/* Adjust for non-0 viewport offset */
-		if (data->viewport.y) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
-			data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
-		}
-		if (data->viewport_c.y) {
-			int int_part = dc_fixpt_floor(
-					dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-			int_part = int_part > 0 ? int_part : 0;
-			data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
-			data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
-		}
-
-		/* Adjust for viewport end clip-off */
-		if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
-			int int_part;
-			int end_offset = src.y + src.height
-					- data->viewport.y - data->viewport.height;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-					data->ratios.vert, data->recout.y - recout_full->y));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, taking into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.v) - end_offset;
-			if (int_part < data->taps.v_taps) {
-				int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
-							(data->taps.v_taps - int_part) : end_offset;
-				data->viewport.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps) {
-				data->viewport.height += int_part - data->taps.v_taps;
-				int_part = data->taps.v_taps;
-			}
-			data->inits.v.value &= 0xffffffff;
-			data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-		}
-
-		if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
-			int int_part;
-			int end_offset = (src.y + src.height) / vpc_div
-					- data->viewport_c.y - data->viewport_c.height;
-
-			/*
-			 * this is init if vp had no offset, keep in mind this is from the
-			 * right side of vp due to scan direction
-			 */
-			data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-					data->ratios.vert_c, data->recout.y - recout_full->y));
-			/*
-			 * this is the difference between first pixel of viewport available to read
-			 * and init position, taking into account scan direction
-			 */
-			int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
-			if (int_part < data->taps.v_taps_c) {
-				int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
-							(data->taps.v_taps_c - int_part) : end_offset;
-				data->viewport_c.height += int_adj;
-				int_part += int_adj;
-			} else if (int_part > data->taps.v_taps_c) {
-				data->viewport_c.height += int_part - data->taps.v_taps_c;
-				int_part = data->taps.v_taps_c;
-			}
-			data->inits.v_c.value &= 0xffffffff;
-			data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
-		}
-	}
+	/*
+	 * Taps, inits and scaling ratios are in recout space need to rotate
+	 * to viewport rotation before adjustment
+	 */
+	adjust_vp_and_init_for_seamless_clip(
+			flip_horz_scan_dir,
+			recout_skip_h,
+			surf_size_h,
+			orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+			orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+			orthogonal_rotation ? &data->inits.v : &data->inits.h,
+			&data->viewport.x,
+			&data->viewport.width);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_horz_scan_dir,
+			recout_skip_h,
+			surf_size_h / vpc_div,
+			orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+			orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+			orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+			&data->viewport_c.x,
+			&data->viewport_c.width);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_vert_scan_dir,
+			recout_skip_v,
+			surf_size_v,
+			orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+			orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+			orthogonal_rotation ? &data->inits.h : &data->inits.v,
+			&data->viewport.y,
+			&data->viewport.height);
+	adjust_vp_and_init_for_seamless_clip(
+			flip_vert_scan_dir,
+			recout_skip_v,
+			surf_size_v / vpc_div,
+			orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+			orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+			orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+			&data->viewport_c.y,
+			&data->viewport_c.height);
 
 	/* Interlaced inits based on final vert inits */
 	data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
 	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
 
-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
-		rect_swap_helper(&data->viewport_c);
-		rect_swap_helper(&data->viewport);
-	}
 }
 
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
-	struct rect recout_full = { 0 };
 	bool res = false;
 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 	/* Important: scaling ratio calculation requires pixel format,
@@ -1115,9 +924,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
 			pipe_ctx->plane_state->format);
 
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->stream->dst.height *= 2;
-
 	calculate_scaling_ratios(pipe_ctx);
 
 	calculate_viewport(pipe_ctx);
@@ -1125,7 +931,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
 		return false;
 
-	calculate_recout(pipe_ctx, &recout_full);
+	calculate_recout(pipe_ctx);
 
 	/**
 	 * Setting line buffer pixel depth to 24bpp yields banding
@@ -1138,9 +944,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
 	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->plane_res.scl_data.v_active *= 2;
-
 
 	/* Taps calculations */
 	if (pipe_ctx->plane_res.xfm != NULL)
@@ -1169,7 +972,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	if (res)
 		/* May need to re-check lb size after this in some obscure scenario */
-		calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+		calculate_inits_and_adj_vp(pipe_ctx);
 
 	DC_LOG_SCALER(
 				"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1185,9 +988,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 				plane_state->dst_rect.x,
 				plane_state->dst_rect.y);
 
-	if (pipe_ctx->stream->timing.flags.INTERLACE)
-		pipe_ctx->stream->dst.height /= 2;
-
 	return res;
 }
 
@@ -1382,6 +1182,9 @@ bool dc_add_plane_to_context(
 		return false;
 	}
 
+	tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+	ASSERT(tail_pipe);
+
 	free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1399,10 +1202,6 @@ bool dc_add_plane_to_context(
 	free_pipe->plane_state = plane_state;
 
 	if (head_pipe != free_pipe) {
-
-		tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
-		ASSERT(tail_pipe);
-
 		free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
 		free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
 		free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1648,6 +1447,14 @@ static bool are_stream_backends_same(
 	return true;
 }
 
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
 bool dc_is_stream_unchanged(
 	struct dc_stream_state *old_stream, struct dc_stream_state *stream)
 {
@@ -1658,6 +1465,9 @@ bool dc_is_stream_unchanged(
 	return true;
 }
 
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
 bool dc_is_stream_scaling_unchanged(
 	struct dc_stream_state *old_stream, struct dc_stream_state *stream)
 {
@@ -1817,16 +1627,19 @@ bool resource_is_stream_unchanged(
 	return false;
 }
 
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
 enum dc_status dc_add_stream_to_ctx(
 		struct dc *dc,
 		struct dc_state *new_ctx,
 		struct dc_stream_state *stream)
 {
-	struct dc_context *dc_ctx = dc->ctx;
 	enum dc_status res;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
-		DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+		DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
 		return DC_ERROR_UNEXPECTED;
 	}
 
@@ -1836,11 +1649,14 @@ enum dc_status dc_add_stream_to_ctx(
 
 	res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
 	if (res != DC_OK)
-		DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+		DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
 
 	return res;
 }
 
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
 enum dc_status dc_remove_stream_from_ctx(
 			struct dc *dc,
 			struct dc_state *new_ctx,
@@ -2002,6 +1818,8 @@ enum dc_status resource_map_pool_resources(
 		}
 	*/
 
+	calculate_phy_pix_clks(stream);
+
 	/* acquire new resources */
 	pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
 
@@ -2059,6 +1877,12 @@ enum dc_status resource_map_pool_resources(
 	return DC_ERROR_UNEXPECTED;
 }
 
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy.  Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
 void dc_resource_state_copy_construct_current(
 		const struct dc *dc,
 		struct dc_state *dst_ctx)
@@ -2071,9 +1895,17 @@ void dc_resource_state_construct(
 		const struct dc *dc,
 		struct dc_state *dst_ctx)
 {
-	dst_ctx->dis_clk = dc->res_pool->dccg;
+	dst_ctx->dccg = dc->res_pool->clk_mgr;
 }
 
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed.  Otherwise, an error code.
+ */
 enum dc_status dc_validate_global_state(
 		struct dc *dc,
 		struct dc_state *new_ctx)
@@ -2401,113 +2233,15 @@ static void set_vendor_info_packet(
 		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
-	uint32_t length = 0;
-	bool hdmi_vic_mode = false;
-	uint8_t checksum = 0;
-	uint32_t i = 0;
-	enum dc_timing_3d_format format;
-	// Can be different depending on packet content /*todo*/
-	// unsigned int length = pPathMode->dolbyVision ? 24 : 5;
+	/* SPD info packet for FreeSync */
 
-	info_packet->valid = false;
-
-	format = stream->timing.timing_3d_format;
-	if (stream->view_format == VIEW_3D_FORMAT_NONE)
-		format = TIMING_3D_FORMAT_NONE;
-
-	/* Can be different depending on packet content */
-	length = 5;
-
-	if (stream->timing.hdmi_vic != 0
-			&& stream->timing.h_total >= 3840
-			&& stream->timing.v_total >= 2160)
-		hdmi_vic_mode = true;
-
-	/* According to HDMI 1.4a CTS, VSIF should be sent
-	 * for both 3D stereo and HDMI VIC modes.
-	 * For all other modes, there is no VSIF sent.  */
-
-	if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+	/* Check if Freesync is supported. Return if false. If true,
+	 * set the corresponding bit in the info packet
+	 */
+	if (!stream->vsp_infopacket.valid)
 		return;
 
-	/* 24bit IEEE Registration identifier (0x000c03). LSB first. */
-	info_packet->sb[1] = 0x03;
-	info_packet->sb[2] = 0x0C;
-	info_packet->sb[3] = 0x00;
-
-	/*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
-	 * The value for HDMI_Video_Format are:
-	 * 0x0 (0b000) - No additional HDMI video format is presented in this
-	 * packet
-	 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
-	 * parameter follows
-	 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
-	 * potentially 3D_Ext_Data follows
-	 * 0x3..0x7 (0b011..0b111) - reserved for future use */
-	if (format != TIMING_3D_FORMAT_NONE)
-		info_packet->sb[4] = (2 << 5);
-	else if (hdmi_vic_mode)
-		info_packet->sb[4] = (1 << 5);
-
-	/* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
-	 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
-	 * The value for 3D_Structure are:
-	 * 0x0 - Frame Packing
-	 * 0x1 - Field Alternative
-	 * 0x2 - Line Alternative
-	 * 0x3 - Side-by-Side (full)
-	 * 0x4 - L + depth
-	 * 0x5 - L + depth + graphics + graphics-depth
-	 * 0x6 - Top-and-Bottom
-	 * 0x7 - Reserved for future use
-	 * 0x8 - Side-by-Side (Half)
-	 * 0x9..0xE - Reserved for future use
-	 * 0xF - Not used */
-	switch (format) {
-	case TIMING_3D_FORMAT_HW_FRAME_PACKING:
-	case TIMING_3D_FORMAT_SW_FRAME_PACKING:
-		info_packet->sb[5] = (0x0 << 4);
-		break;
-
-	case TIMING_3D_FORMAT_SIDE_BY_SIDE:
-	case TIMING_3D_FORMAT_SBS_SW_PACKED:
-		info_packet->sb[5] = (0x8 << 4);
-		length = 6;
-		break;
-
-	case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
-	case TIMING_3D_FORMAT_TB_SW_PACKED:
-		info_packet->sb[5] = (0x6 << 4);
-		break;
-
-	default:
-		break;
-	}
-
-	/*PB5: If PB4 is set to 0x1 (extended resolution format)
-	 * fill PB5 with the correct HDMI VIC code */
-	if (hdmi_vic_mode)
-		info_packet->sb[5] = stream->timing.hdmi_vic;
-
-	/* Header */
-	info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
-	info_packet->hb1 = 0x01; /* Version */
-
-	/* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
-	info_packet->hb2 = (uint8_t) (length);
-
-	/* Calculate checksum */
-	checksum = 0;
-	checksum += info_packet->hb0;
-	checksum += info_packet->hb1;
-	checksum += info_packet->hb2;
-
-	for (i = 1; i <= length; i++)
-		checksum += info_packet->sb[i];
-
-	info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
-	info_packet->valid = true;
+	*info_packet = stream->vsp_infopacket;
 }
 
 static void set_spd_info_packet(
@@ -2563,10 +2297,6 @@ void dc_resource_state_destruct(struct dc_state *context)
 	}
 }
 
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
 void dc_resource_state_copy_construct(
 		const struct dc_state *src_ctx,
 		struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 2ac848a1..66e5c46 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -100,12 +100,11 @@ static void construct(struct dc_stream_state *stream,
 	/* EDID CAP translation for HDMI 2.0 */
 	stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
 
-	stream->status.link = stream->sink->link;
-
 	update_stream_signal(stream);
 
 	stream->out_transfer_func = dc_create_transfer_func();
 	stream->out_transfer_func->type = TF_TYPE_BYPASS;
+	stream->out_transfer_func->ctx = stream->ctx;
 }
 
 static void destruct(struct dc_stream_state *stream)
@@ -171,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
 }
 
 /**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
  */
 bool dc_stream_set_cursor_attributes(
 	struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 8fb3aef..c60c9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
 
 	plane_state->in_transfer_func = dc_create_transfer_func();
 	plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+	plane_state->in_transfer_func->ctx = ctx;
 }
 
 static void destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b57fa61..4b5bbb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,9 +36,10 @@
 
 #include "inc/hw_sequencer.h"
 #include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.68"
+#define DC_VER "3.2.08"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -47,13 +48,6 @@
 /*******************************************************************************
  * Display Core Interfaces
  ******************************************************************************/
-struct dmcu_version {
-	unsigned int date;
-	unsigned int month;
-	unsigned int year;
-	unsigned int interface_version;
-};
-
 struct dc_versions {
 	const char *dc_ver;
 	struct dmcu_version dmcu_version;
@@ -250,8 +244,6 @@ struct dc_debug_options {
 	bool disable_dmcu;
 	bool disable_psr;
 	bool force_abm_enable;
-	bool disable_hbup_pg;
-	bool disable_dpp_pg;
 	bool disable_stereo_support;
 	bool vsr_support;
 	bool performance_trace;
@@ -305,11 +297,6 @@ struct dc {
 	struct hw_sequencer_funcs hwss;
 	struct dce_hwseq *hwseq;
 
-	/* temp store of dm_pp_display_configuration
-	 * to compare to see if display config changed
-	 */
-	struct dm_pp_display_configuration prev_display_config;
-
 	bool optimized_required;
 
 	/* FBC compressor */
@@ -755,5 +742,6 @@ void dc_set_power_state(
 		struct dc *dc,
 		enum dc_acpi_cm_power_state power_state);
 void dc_resume(struct dc *dc);
+bool dc_is_dmcu_initialized(struct dc *dc);
 
 #endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 8130b95..a8b3ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -86,6 +86,10 @@ struct dc_vbios_funcs {
 
 	bool (*is_accelerated_mode)(
 		struct dc_bios *bios);
+	bool (*is_active_display)(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag);
 	void (*set_scratch_critical_state)(
 		struct dc_bios *bios,
 		bool state);
@@ -141,6 +145,7 @@ struct dc_vbios_funcs {
 };
 
 struct bios_registers {
+	uint32_t BIOS_SCRATCH_0;
 	uint32_t BIOS_SCRATCH_3;
 	uint32_t BIOS_SCRATCH_6;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 7825e4b..9ddfe4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -358,15 +358,16 @@ union dc_tiling_info {
 	} gfx8;
 
 	struct {
+		enum swizzle_mode_values swizzle;
 		unsigned int num_pipes;
-		unsigned int num_banks;
+		unsigned int max_compressed_frags;
 		unsigned int pipe_interleave;
+
+		unsigned int num_banks;
 		unsigned int num_shader_engines;
 		unsigned int num_rb_per_se;
-		unsigned int max_compressed_frags;
 		bool shaderEnable;
 
-		enum swizzle_mode_values swizzle;
 		bool meta_linear;
 		bool rb_aligned;
 		bool pipe_aligned;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 3bfdccc..29f19d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -128,8 +128,10 @@ struct dc_link {
 
 const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
 
-/*
- * Return an enumerated dc_link.  dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
  * boot time.  They cannot be created or destroyed.
  * Use dc_get_caps() to get number of links.
  */
@@ -138,9 +140,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
 	return dc->links[link_index];
 }
 
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
-		uint32_t frame_ramp, const struct dc_stream_state *stream);
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+		uint32_t backlight_pwm_u16_16,
+		uint32_t frame_ramp,
+		const struct dc_stream_state *stream);
 
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c5bd1fb..be34d63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,6 +56,7 @@ struct dc_stream_state {
 	struct dc_crtc_timing_adjust adjust;
 	struct dc_info_packet vrr_infopacket;
 	struct dc_info_packet vsc_infopacket;
+	struct dc_info_packet vsp_infopacket;
 
 	struct rect src; /* composition area */
 	struct rect dst; /* stream addressable area */
@@ -104,8 +105,6 @@ struct dc_stream_state {
 	bool dpms_off;
 	bool apply_edp_fast_boot_optimization;
 
-	struct dc_stream_status status;
-
 	struct dc_cursor_attributes cursor_attributes;
 	struct dc_cursor_position cursor_position;
 	uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
@@ -131,11 +130,13 @@ struct dc_stream_update {
 	struct dc_crtc_timing_adjust *adjust;
 	struct dc_info_packet *vrr_infopacket;
 	struct dc_info_packet *vsc_infopacket;
+	struct dc_info_packet *vsp_infopacket;
 
 	bool *dpms_off;
 
 	struct colorspace_transform *gamut_remap;
 	enum dc_color_space *output_color_space;
+	enum dc_dither_option *dither_option;
 
 	struct dc_csc_transform *output_csc_transform;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6e12d64..0b20ae2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -73,10 +73,18 @@ struct hw_asic_id {
 	void *atombios_base_address;
 };
 
+struct dc_perf_trace {
+	unsigned long read_count;
+	unsigned long write_count;
+	unsigned long last_entry_read;
+	unsigned long last_entry_write;
+};
+
 struct dc_context {
 	struct dc *dc;
 
 	void *driver_context; /* e.g. amdgpu_device */
+	struct dc_perf_trace *perf_trace;
 	void *cgs_device;
 
 	enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
 };
 
 struct dc_panel_patch {
-	unsigned int disconnect_delay;
 	unsigned int dppowerup_delay;
 	unsigned int extra_t12_ms;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8f7f0e8..6d7b64a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
 
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
 dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 29294db..2a342ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -54,7 +54,7 @@
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
 
-static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
 {
 	uint64_t current_backlight;
 	uint32_t round_result;
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
 	return (uint32_t)(current_backlight);
 }
 
-static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+static void driver_set_backlight_level(struct dce_abm *abm_dce,
+		uint32_t backlight_pwm_u16_16)
 {
-	uint32_t backlight_24bit;
-	uint32_t backlight_17bit;
 	uint32_t backlight_16bit;
 	uint32_t masked_pwm_period;
-	uint8_t rounding_bit;
 	uint8_t bit_count;
 	uint64_t active_duty_cycle;
 	uint32_t pwm_period_bitcnt;
 
 	/*
-	 * 1. Convert 8-bit value to 17 bit U1.16 format
-	 * (1 integer, 16 fractional bits)
-	 */
-
-	/* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
-	 * effectively multiplying value by 256/255
-	 * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
-	 */
-	backlight_24bit = level * 0x10101;
-
-	/* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
-	 * used for rounding, take most significant bit of fraction for
-	 * rounding, e.g. for 0xEFEFEF, rounding bit is 1
-	 */
-	rounding_bit = (backlight_24bit >> 7) & 1;
-
-	/* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
-	 * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
-	 */
-	backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
-
-	/*
-	 * 2. Find  16 bit backlight active duty cycle, where 0 <= backlight
+	 * 1. Find  16 bit backlight active duty cycle, where 0 <= backlight
 	 * active duty cycle <= backlight period
 	 */
 
-	/* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+	/* 1.1 Apply bitmask for backlight period value based on value of BITCNT
 	 */
 	REG_GET_2(BL_PWM_PERIOD_CNTL,
 			BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 	/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
 	masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
 
-	/* 2.2 Calculate integer active duty cycle required upper 16 bits
+	/* 1.2 Calculate integer active duty cycle required upper 16 bits
 	 * contain integer component, lower 16 bits contain fractional component
 	 * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
 	 */
-	active_duty_cycle = backlight_17bit * masked_pwm_period;
+	active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
 
-	/* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+	/* 1.3 Calculate 16 bit active duty cycle from integer and fractional
 	 * components shift by bitCount then mask 16 bits and add rounding bit
 	 * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
 	 */
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 	backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
 
 	/*
-	 * 3. Program register with updated value
+	 * 2. Program register with updated value
 	 */
 
-	/* 3.1 Lock group 2 backlight registers */
+	/* 2.1 Lock group 2 backlight registers */
 
 	REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
 			BL_PWM_GRP1_REG_LOCK, 1);
 
-	// 3.2 Write new active duty cycle
+	// 2.2 Write new active duty cycle
 	REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
 
-	/* 3.3 Unlock group 2 backlight registers */
+	/* 2.3 Unlock group 2 backlight registers */
 	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_REG_LOCK, 0);
 
-	/* 5.4.4 Wait for pending bit to be cleared */
+	/* 3 Wait for pending bit to be cleared */
 	REG_WAIT(BL_PWM_GRP1_REG_LOCK,
 			BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
 			1, 10000);
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
 
 static void dmcu_set_backlight_level(
 	struct dce_abm *abm_dce,
-	uint32_t level,
+	uint32_t backlight_pwm_u16_16,
 	uint32_t frame_ramp,
 	uint32_t controller_id)
 {
-	unsigned int backlight_16_bit = (level * 0x10101) >> 8;
-	unsigned int backlight_17_bit = backlight_16_bit +
-				(((backlight_16_bit & 0x80) >> 7) & 1);
+	unsigned int backlight_8_bit = 0;
 	uint32_t rampingBoundary = 0xFFFF;
 	uint32_t s2;
 
+	if (backlight_pwm_u16_16 & 0x10000)
+		// Check for max backlight condition
+		backlight_8_bit = 0xFF;
+	else
+		// Take MSB of fractional part since backlight is not max
+		backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
 	/* set ramping boundary */
 	REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
 
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level(
 			0, 1, 80000);
 
 	/* setDMCUParam_BL */
-	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
 
 	/* write ramp */
 	if (controller_id == 0)
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level(
 	s2 = REG_READ(BIOS_SCRATCH_2);
 
 	s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
-	level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+	backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
 				ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
-	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+	s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
 	REG_WRITE(BIOS_SCRATCH_2, s2);
 }
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level(
 static void dce_abm_init(struct abm *abm)
 {
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-	unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+	unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
 
 	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
 	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm)
 			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
 }
 
-static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+static unsigned int dce_abm_get_current_backlight(struct abm *abm)
 {
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 	unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
 
-	return (backlight >> 8);
+	/* return backlight in hardware format which is unsigned 17 bits, with
+	 * 1 bit integer and 16 bit fractional
+	 */
+	return backlight;
+}
+
+static unsigned int dce_abm_get_target_backlight(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+	/* return backlight in hardware format which is unsigned 17 bits, with
+	 * 1 bit integer and 16 bit fractional
+	 */
+	return backlight;
 }
 
 static bool dce_abm_set_level(struct abm *abm, uint32_t level)
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm)
 	return true;
 }
 
-static bool dce_abm_set_backlight_level(
+static bool dce_abm_set_backlight_level_pwm(
 		struct abm *abm,
-		unsigned int backlight_level,
+		unsigned int backlight_pwm_u16_16,
 		unsigned int frame_ramp,
 		unsigned int controller_id,
 		bool use_smooth_brightness)
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level(
 	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
 	DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
-			backlight_level, backlight_level);
+			backlight_pwm_u16_16, backlight_pwm_u16_16);
 
 	/* If DMCU is in reset state, DMCU is uninitialized */
 	if (use_smooth_brightness)
 		dmcu_set_backlight_level(abm_dce,
-				backlight_level,
+				backlight_pwm_u16_16,
 				frame_ramp,
 				controller_id);
 	else
-		driver_set_backlight_level(abm_dce, backlight_level);
+		driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
 
 	return true;
 }
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = {
 	.abm_init = dce_abm_init,
 	.set_abm_level = dce_abm_set_level,
 	.init_backlight = dce_abm_init_backlight,
-	.set_backlight_level = dce_abm_set_backlight_level,
-	.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+	.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
+	.get_current_backlight = dce_abm_get_current_backlight,
+	.get_target_backlight = dce_abm_get_target_backlight,
 	.set_abm_immediate_disable = dce_abm_immediate_disable
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
new file mode 100644
index 0000000..bd22f51
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -0,0 +1,884 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+	container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+	(clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+	clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+	clk_mgr->ctx->logger
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+int dentist_get_divider_from_did(int did)
+{
+	if (did < DENTIST_BASE_DID_1)
+		did = DENTIST_BASE_DID_1;
+	if (did > DENTIST_MAX_DID)
+		did = DENTIST_MAX_DID;
+
+	if (did < DENTIST_BASE_DID_2) {
+		return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+							* (did - DENTIST_BASE_DID_1);
+	} else if (did < DENTIST_BASE_DID_3) {
+		return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+							* (did - DENTIST_BASE_DID_2);
+	} else if (did < DENTIST_BASE_DID_4) {
+		return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+							* (did - DENTIST_BASE_DID_3);
+	} else {
+		return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+							* (did - DENTIST_BASE_DID_4);
+	}
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+  */
+static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
+{
+	if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+				dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+							clk_mgr_dce->dprefclk_ss_divider), 200);
+		struct fixed31_32 adj_dp_ref_clk_khz;
+
+		ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+		adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+	}
+	return dp_ref_clk_khz;
+}
+
+static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	int dprefclk_wdivider;
+	int dprefclk_src_sel;
+	int dp_ref_clk_khz = 600000;
+	int target_div;
+
+	/* ASSERT DP Reference Clock source is from DFS*/
+	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+	ASSERT(dprefclk_src_sel == 0);
+
+	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+	target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+	/* Calculate the current DFS clock, in kHz.*/
+	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+		* clk_mgr_dce->dentist_vco_freq_khz) / target_div;
+
+	return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+
+	return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+	uint32_t max_pix_clk = 0;
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (pipe_ctx->stream == NULL)
+			continue;
+
+		/* do not check under lay */
+		if (pipe_ctx->top_pipe)
+			continue;
+
+		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+			max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+		/* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+		 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+		 */
+		if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+				pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+			max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+	}
+
+	return max_pix_clk;
+}
+
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+	struct clk_mgr *clk_mgr,
+	struct dc_state *context)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	int i;
+	enum dm_pp_clocks_state low_req_clk;
+	int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+	/* Iterate from highest supported to lowest valid state, and update
+	 * lowest RequiredState with the lowest state that satisfies
+	 * all required clocks
+	 */
+	for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+		if (context->bw.dce.dispclk_khz >
+				clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+			|| max_pix_clk >
+				clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+			break;
+
+	low_req_clk = i + 1;
+	if (low_req_clk > clk_mgr_dce->max_clks_state) {
+		/* set max clock state for high phyclock, invalid on exceeding display clock */
+		if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+				< context->bw.dce.dispclk_khz)
+			low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+		else
+			low_req_clk = clk_mgr_dce->max_clks_state;
+	}
+
+	return low_req_clk;
+}
+
+static int dce_set_clock(
+	struct clk_mgr *clk_mgr,
+	int requested_clk_khz)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+	struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+	int actual_clock = requested_clk_khz;
+	struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+	/* Make sure requested clock isn't lower than minimum threshold*/
+	if (requested_clk_khz > 0)
+		requested_clk_khz = max(requested_clk_khz,
+				clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+	/* Prepare to program display clock*/
+	pxl_clk_params.target_pixel_clock = requested_clk_khz;
+	pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+	if (clk_mgr_dce->dfs_bypass_active)
+		pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+	bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+	if (clk_mgr_dce->dfs_bypass_active) {
+		/* Cache the fixed display clock*/
+		clk_mgr_dce->dfs_bypass_disp_clk =
+			pxl_clk_params.dfs_bypass_display_clock;
+		actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+	}
+
+	/* from power down, we need mark the clock state as ClocksStateNominal
+	 * from HWReset, so when resume we will call pplib voltage regulator.*/
+	if (requested_clk_khz == 0)
+		clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+	if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+		dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+	return actual_clock;
+}
+
+int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct bp_set_dce_clock_parameters dce_clk_params;
+	struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+	struct dc *core_dc = clk_mgr->ctx->dc;
+	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	int actual_clock = requested_clk_khz;
+	/* Prepare to program display clock*/
+	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+	/* Make sure requested clock isn't lower than minimum threshold*/
+	if (requested_clk_khz > 0)
+		requested_clk_khz = max(requested_clk_khz,
+				clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+	dce_clk_params.target_clock_frequency = requested_clk_khz;
+	dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+	dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+	bp->funcs->set_dce_clock(bp, &dce_clk_params);
+	actual_clock = dce_clk_params.target_clock_frequency;
+
+	/* from power down, we need mark the clock state as ClocksStateNominal
+	 * from HWReset, so when resume we will call pplib voltage regulator.*/
+	if (requested_clk_khz == 0)
+		clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+	/*Program DP ref Clock*/
+	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+	dce_clk_params.target_clock_frequency = 0;
+	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+	if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+			(dce_clk_params.pll_id ==
+					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+	else
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+	bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+			if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+				dmcu->funcs->set_psr_wait_loop(dmcu,
+						actual_clock / 1000 / 7);
+		}
+	}
+
+	clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+	return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+	struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+	struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+	struct integrated_info info = { { { 0 } } };
+	struct dc_firmware_info fw_info = { { 0 } };
+	int i;
+
+	if (bp->integrated_info)
+		info = *bp->integrated_info;
+
+	clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+	if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+		bp->funcs->get_firmware_info(bp, &fw_info);
+		clk_mgr_dce->dentist_vco_freq_khz =
+			fw_info.smu_gpu_pll_output_freq;
+		if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+			clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+	}
+
+	/*update the maximum display clock for each power state*/
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+		switch (i) {
+		case 0:
+			clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+			break;
+
+		case 1:
+			clk_state = DM_PP_CLOCKS_STATE_LOW;
+			break;
+
+		case 2:
+			clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+			break;
+
+		case 3:
+			clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+			break;
+
+		default:
+			clk_state = DM_PP_CLOCKS_STATE_INVALID;
+			break;
+		}
+
+		/*Do not allow bad VBIOS/SBIOS to override with invalid values,
+		 * check for > 100MHz*/
+		if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+			clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+				info.disp_clk_voltage[i].max_supported_clk;
+	}
+
+	if (!debug->disable_dfs_bypass && bp->integrated_info)
+		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+			clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+	struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+	int ss_info_num = bp->funcs->get_ss_entry_number(
+			bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+	if (ss_info_num) {
+		struct spread_spectrum_info info = { { 0 } };
+		enum bp_result result = bp->funcs->get_spread_spectrum_info(
+				bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+		/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+		 * even if SS not enabled and in that case
+		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
+		 * that SS is enabled
+		 */
+		if (result == BP_RESULT_OK &&
+				info.spread_spectrum_percentage != 0) {
+			clk_mgr_dce->ss_on_dprefclk = true;
+			clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+			if (info.type.CENTER_MODE == 0) {
+				/* TODO: Currently for DP Reference clock we
+				 * need only SS percentage for
+				 * downspread */
+				clk_mgr_dce->dprefclk_ss_percentage =
+						info.spread_spectrum_percentage;
+			}
+
+			return;
+		}
+
+		result = bp->funcs->get_spread_spectrum_info(
+				bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+		/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+		 * even if SS not enabled and in that case
+		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
+		 * that SS is enabled
+		 */
+		if (result == BP_RESULT_OK &&
+				info.spread_spectrum_percentage != 0) {
+			clk_mgr_dce->ss_on_dprefclk = true;
+			clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+			if (info.type.CENTER_MODE == 0) {
+				/* Currently for DP Reference clock we
+				 * need only SS percentage for
+				 * downspread */
+				clk_mgr_dce->dprefclk_ss_percentage =
+						info.spread_spectrum_percentage;
+			}
+		}
+	}
+}
+
+void dce110_fill_display_configs(
+	const struct dc_state *context,
+	struct dm_pp_display_configuration *pp_display_cfg)
+{
+	int j;
+	int num_cfgs = 0;
+
+	for (j = 0; j < context->stream_count; j++) {
+		int k;
+
+		const struct dc_stream_state *stream = context->streams[j];
+		struct dm_pp_single_disp_config *cfg =
+			&pp_display_cfg->disp_configs[num_cfgs];
+		const struct pipe_ctx *pipe_ctx = NULL;
+
+		for (k = 0; k < MAX_PIPES; k++)
+			if (stream == context->res_ctx.pipe_ctx[k].stream) {
+				pipe_ctx = &context->res_ctx.pipe_ctx[k];
+				break;
+			}
+
+		ASSERT(pipe_ctx != NULL);
+
+		/* only notify active stream */
+		if (stream->dpms_off)
+			continue;
+
+		num_cfgs++;
+		cfg->signal = pipe_ctx->stream->signal;
+		cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+		cfg->src_height = stream->src.height;
+		cfg->src_width = stream->src.width;
+		cfg->ddi_channel_mapping =
+			stream->sink->link->ddi_channel_mapping.raw;
+		cfg->transmitter =
+			stream->sink->link->link_enc->transmitter;
+		cfg->link_settings.lane_count =
+			stream->sink->link->cur_link_settings.lane_count;
+		cfg->link_settings.link_rate =
+			stream->sink->link->cur_link_settings.link_rate;
+		cfg->link_settings.link_spread =
+			stream->sink->link->cur_link_settings.link_spread;
+		cfg->sym_clock = stream->phy_pix_clk;
+		/* Round v_refresh*/
+		cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+		cfg->v_refresh /= stream->timing.h_total;
+		cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+							/ stream->timing.v_total;
+	}
+
+	pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+	uint8_t j;
+	uint32_t min_vertical_blank_time = -1;
+
+	for (j = 0; j < context->stream_count; j++) {
+		struct dc_stream_state *stream = context->streams[j];
+		uint32_t vertical_blank_in_pixels = 0;
+		uint32_t vertical_blank_time = 0;
+
+		vertical_blank_in_pixels = stream->timing.h_total *
+			(stream->timing.v_total
+			 - stream->timing.v_addressable);
+
+		vertical_blank_time = vertical_blank_in_pixels
+			* 1000 / stream->timing.pix_clk_khz;
+
+		if (min_vertical_blank_time > vertical_blank_time)
+			min_vertical_blank_time = vertical_blank_time;
+	}
+
+	return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+		const struct dc *dc,
+		int required_sclk)
+{
+	int i;
+
+	/*
+	 * Some asics do not give us sclk levels, so we just report the actual
+	 * required sclk
+	 */
+	if (dc->sclk_lvls.num_levels == 0)
+		return required_sclk;
+
+	for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+		if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+			return dc->sclk_lvls.clocks_in_khz[i];
+	}
+	/*
+	 * even maximum level could not satisfy requirement, this
+	 * is unexpected at this stage, should have been caught at
+	 * validation time
+	 */
+	ASSERT(0);
+	return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void dce_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->all_displays_in_sync =
+		context->bw.dce.all_displays_in_sync;
+	pp_display_cfg->nb_pstate_switch_disable =
+			context->bw.dce.nbp_state_change_enable == false;
+	pp_display_cfg->cpu_cc6_disable =
+			context->bw.dce.cpuc_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_disable =
+			context->bw.dce.cpup_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_separation_time =
+			context->bw.dce.blackout_recovery_time_us;
+
+	pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+		/ MEMORY_TYPE_MULTIPLIER_CZ;
+
+	pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+			dc,
+			context->bw.dce.sclk_khz);
+
+	pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
+	pp_display_cfg->min_engine_clock_deep_sleep_khz
+			= context->bw.dce.sclk_deep_sleep_khz;
+
+	pp_display_cfg->avail_mclk_switch_time_us =
+						dce110_get_min_vblank_time_us(context);
+	/* TODO: dce11.2*/
+	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+	pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	/* TODO: is this still applicable?*/
+	if (pp_display_cfg->display_count == 1) {
+		const struct dc_crtc_timing *timing =
+			&context->streams[0]->timing;
+
+		pp_display_cfg->crtc_index =
+			pp_display_cfg->disp_configs[0].pipe_idx;
+		pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+	}
+
+	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+	int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+	/*TODO: W/A for dal3 linux, investigate why this works */
+	if (!clk_mgr_dce->dfs_bypass_active)
+		context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 
+	}
+	dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+	context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_power_level_change_request level_change_req;
+
+	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+	/* get max clock state from PPLIB */
+	if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+			|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+		if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+			clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+	}
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+	int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+	int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+	/*TODO: W/A for dal3 linux, investigate why this works */
+	if (!clk_mgr_dce->dfs_bypass_active)
+		context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+	if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+		clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+		context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+		clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+
+		dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+	}
+
+	if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
+		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+		clock_voltage_req.clocks_in_khz = max_pix_clk;
+		clk_mgr->clks.phyclk_khz = max_pix_clk;
+
+		dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+	}
+	dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+	context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static const struct clk_mgr_funcs dce120_funcs = {
+	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+	.update_clocks = dce12_update_clocks
+};
+
+static const struct clk_mgr_funcs dce112_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce112_update_clocks
+};
+
+static const struct clk_mgr_funcs dce110_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce11_update_clocks,
+};
+
+static const struct clk_mgr_funcs dce_funcs = {
+	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+	.update_clocks = dce_update_clocks
+};
+
+static void dce_clk_mgr_construct(
+	struct dce_clk_mgr *clk_mgr_dce,
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct clk_mgr *base = &clk_mgr_dce->base;
+	struct dm_pp_static_clock_info static_clk_info = {0};
+
+	base->ctx = ctx;
+	base->funcs = &dce_funcs;
+
+	clk_mgr_dce->regs = regs;
+	clk_mgr_dce->clk_mgr_shift = clk_shift;
+	clk_mgr_dce->clk_mgr_mask = clk_mask;
+
+	clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+	clk_mgr_dce->dprefclk_ss_percentage = 0;
+	clk_mgr_dce->dprefclk_ss_divider = 1000;
+	clk_mgr_dce->ss_on_dprefclk = false;
+
+
+	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+		clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
+	else
+		clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+	clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+	dce_clock_read_integrated_info(clk_mgr_dce);
+	dce_clock_read_ss_info(clk_mgr_dce);
+}
+
+struct clk_mgr *dce_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce80_max_clks_by_state,
+		sizeof(dce80_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce110_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce110_max_clks_by_state,
+		sizeof(dce110_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	clk_mgr_dce->base.funcs = &dce110_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce112_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce112_max_clks_by_state,
+		sizeof(dce112_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+	clk_mgr_dce->base.funcs = &dce112_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
+{
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	memcpy(clk_mgr_dce->max_clks_by_state,
+		dce120_max_clks_by_state,
+		sizeof(dce120_max_clks_by_state));
+
+	dce_clk_mgr_construct(
+		clk_mgr_dce, ctx, NULL, NULL, NULL);
+
+	clk_mgr_dce->dprefclk_khz = 600000;
+	clk_mgr_dce->base.funcs = &dce120_funcs;
+
+	return &clk_mgr_dce->base;
+}
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
+{
+	struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
+
+	kfree(clk_mgr_dce);
+	*clk_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
new file mode 100644
index 0000000..3bceb31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
+
+#include "clk_mgr.h"
+#include "dccg.h"
+
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
+
+#define CLK_COMMON_REG_LIST_DCE_BASE() \
+	.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+	.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+	SR(DENTIST_DISPCLK_CNTL)
+
+#define CLK_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+	CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
+#define CLK_REG_FIELD_LIST(type) \
+	type DPREFCLK_SRC_SEL; \
+	type DENTIST_DPREFCLK_WDIVIDER; \
+	type DENTIST_DISPCLK_WDIVIDER; \
+	type DENTIST_DISPCLK_CHG_DONE;
+
+struct clk_mgr_shift {
+	CLK_REG_FIELD_LIST(uint8_t)
+};
+
+struct clk_mgr_mask {
+	CLK_REG_FIELD_LIST(uint32_t)
+};
+
+struct clk_mgr_registers {
+	uint32_t DPREFCLK_CNTL;
+	uint32_t DENTIST_DISPCLK_CNTL;
+};
+
+struct state_dependent_clocks {
+	int display_clk_khz;
+	int pixel_clk_khz;
+};
+
+struct dce_clk_mgr {
+	struct clk_mgr base;
+	const struct clk_mgr_registers *regs;
+	const struct clk_mgr_shift *clk_mgr_shift;
+	const struct clk_mgr_mask *clk_mgr_mask;
+
+	struct dccg *dccg;
+
+	struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+
+	int dentist_vco_freq_khz;
+
+	/* Cache the status of DFS-bypass feature*/
+	bool dfs_bypass_enabled;
+	/* True if the DFS-bypass feature is enabled and active. */
+	bool dfs_bypass_active;
+	/* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+	 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+	int dfs_bypass_disp_clk;
+
+	/* Flag for Enabled SS on DPREFCLK */
+	bool ss_on_dprefclk;
+	/* DPREFCLK SS percentage (if down-spread enabled) */
+	int dprefclk_ss_percentage;
+	/* DPREFCLK SS percentage Divider (100 or 1000) */
+	int dprefclk_ss_divider;
+	int dprefclk_khz;
+
+	enum dm_pp_clocks_state max_clks_state;
+	enum dm_pp_clocks_state cur_min_clks_state;
+};
+
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+	DENTIST_BASE_DID_1 = 0x08,
+	DENTIST_BASE_DID_2 = 0x40,
+	DENTIST_BASE_DID_3 = 0x60,
+	DENTIST_BASE_DID_4 = 0x7e,
+	DENTIST_MAX_DID = 0x7f
+};
+
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+	DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
+	DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
+	DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
+	DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
+	DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+	DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
+	DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+	DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
+	DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+	return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
+
+void dce110_fill_display_configs(
+	const struct dc_state *context,
+	struct dm_pp_display_configuration *pp_display_cfg);
+
+int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
+
+struct clk_mgr *dce_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce110_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce112_clk_mgr_create(
+	struct dc_context *ctx,
+	const struct clk_mgr_registers *regs,
+	const struct clk_mgr_shift *clk_shift,
+	const struct clk_mgr_mask *clk_mask);
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
+
+int dentist_get_divider_from_did(int did);
+
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
deleted file mode 100644
index d89a097..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dce_clocks.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-#include "bios_parser_interface.h"
-#include "dc.h"
-#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn_calcs.h"
-#endif
-#include "core_types.h"
-#include "dc_types.h"
-#include "dal_asic_id.h"
-
-#define TO_DCE_CLOCKS(clocks)\
-	container_of(clocks, struct dce_dccg, base)
-
-#define REG(reg) \
-	(clk_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
-	clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
-
-#define CTX \
-	clk_dce->base.ctx
-#define DC_LOGGER \
-	clk->ctx->logger
-
-/* Max clock values for each state indexed by "enum clocks_state": */
-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
-/* ClocksStateInvalid - should not be used */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateLow */
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
-/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
-/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
-
-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-
-/* Starting DID for each range */
-enum dentist_base_divider_id {
-	DENTIST_BASE_DID_1 = 0x08,
-	DENTIST_BASE_DID_2 = 0x40,
-	DENTIST_BASE_DID_3 = 0x60,
-	DENTIST_BASE_DID_4 = 0x7e,
-	DENTIST_MAX_DID = 0x7f
-};
-
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
-	DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
-	DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
-	DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
-	DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
-	DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
-	DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
-	DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
-	DENTIST_DIVIDER_RANGE_4_STEP  = 264, /* 66.00 */
-	DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
-
-static int dentist_get_divider_from_did(int did)
-{
-	if (did < DENTIST_BASE_DID_1)
-		did = DENTIST_BASE_DID_1;
-	if (did > DENTIST_MAX_DID)
-		did = DENTIST_MAX_DID;
-
-	if (did < DENTIST_BASE_DID_2) {
-		return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
-							* (did - DENTIST_BASE_DID_1);
-	} else if (did < DENTIST_BASE_DID_3) {
-		return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
-							* (did - DENTIST_BASE_DID_2);
-	} else if (did < DENTIST_BASE_DID_4) {
-		return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
-							* (did - DENTIST_BASE_DID_3);
-	} else {
-		return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
-							* (did - DENTIST_BASE_DID_4);
-	}
-}
-
-/* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
-  */
-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
-{
-	if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
-				dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
-							clk_dce->dprefclk_ss_divider), 200);
-		struct fixed31_32 adj_dp_ref_clk_khz;
-
-		ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
-		adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
-		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
-	}
-	return dp_ref_clk_khz;
-}
-
-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	int dprefclk_wdivider;
-	int dprefclk_src_sel;
-	int dp_ref_clk_khz = 600000;
-	int target_div;
-
-	/* ASSERT DP Reference Clock source is from DFS*/
-	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
-	ASSERT(dprefclk_src_sel == 0);
-
-	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
-	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
-	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
-	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
-	target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
-	/* Calculate the current DFS clock, in kHz.*/
-	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
-		* clk_dce->dentist_vco_freq_khz) / target_div;
-
-	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
-}
-
-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-
-	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
-}
-
-static enum dm_pp_clocks_state dce_get_required_clocks_state(
-	struct dccg *clk,
-	struct dc_clocks *req_clocks)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	int i;
-	enum dm_pp_clocks_state low_req_clk;
-
-	/* Iterate from highest supported to lowest valid state, and update
-	 * lowest RequiredState with the lowest state that satisfies
-	 * all required clocks
-	 */
-	for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
-		if (req_clocks->dispclk_khz >
-				clk_dce->max_clks_by_state[i].display_clk_khz
-			|| req_clocks->phyclk_khz >
-				clk_dce->max_clks_by_state[i].pixel_clk_khz)
-			break;
-
-	low_req_clk = i + 1;
-	if (low_req_clk > clk->max_clks_state) {
-		/* set max clock state for high phyclock, invalid on exceeding display clock */
-		if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
-				< req_clocks->dispclk_khz)
-			low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
-		else
-			low_req_clk = clk->max_clks_state;
-	}
-
-	return low_req_clk;
-}
-
-static int dce_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
-	struct dc_bios *bp = clk->ctx->dc_bios;
-	int actual_clock = requested_clk_khz;
-
-	/* Make sure requested clock isn't lower than minimum threshold*/
-	if (requested_clk_khz > 0)
-		requested_clk_khz = max(requested_clk_khz,
-				clk_dce->dentist_vco_freq_khz / 64);
-
-	/* Prepare to program display clock*/
-	pxl_clk_params.target_pixel_clock = requested_clk_khz;
-	pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-
-	if (clk_dce->dfs_bypass_active)
-		pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
-
-	bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
-
-	if (clk_dce->dfs_bypass_active) {
-		/* Cache the fixed display clock*/
-		clk_dce->dfs_bypass_disp_clk =
-			pxl_clk_params.dfs_bypass_display_clock;
-		actual_clock = pxl_clk_params.dfs_bypass_display_clock;
-	}
-
-	/* from power down, we need mark the clock state as ClocksStateNominal
-	 * from HWReset, so when resume we will call pplib voltage regulator.*/
-	if (requested_clk_khz == 0)
-		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-	return actual_clock;
-}
-
-static int dce_psr_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct dc_context *ctx = clk_dce->base.ctx;
-	struct dc *core_dc = ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
-	int actual_clk_khz = requested_clk_khz;
-
-	actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
-
-	dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
-	return actual_clk_khz;
-}
-
-static int dce112_set_clock(
-	struct dccg *clk,
-	int requested_clk_khz)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-	struct bp_set_dce_clock_parameters dce_clk_params;
-	struct dc_bios *bp = clk->ctx->dc_bios;
-	struct dc *core_dc = clk->ctx->dc;
-	struct dmcu *dmcu = core_dc->res_pool->dmcu;
-	int actual_clock = requested_clk_khz;
-	/* Prepare to program display clock*/
-	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
-
-	/* Make sure requested clock isn't lower than minimum threshold*/
-	if (requested_clk_khz > 0)
-		requested_clk_khz = max(requested_clk_khz,
-				clk_dce->dentist_vco_freq_khz / 62);
-
-	dce_clk_params.target_clock_frequency = requested_clk_khz;
-	dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-	dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
-
-	bp->funcs->set_dce_clock(bp, &dce_clk_params);
-	actual_clock = dce_clk_params.target_clock_frequency;
-
-	/* from power down, we need mark the clock state as ClocksStateNominal
-	 * from HWReset, so when resume we will call pplib voltage regulator.*/
-	if (requested_clk_khz == 0)
-		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-
-	/*Program DP ref Clock*/
-	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
-	dce_clk_params.target_clock_frequency = 0;
-	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
-	if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
-		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
-			(dce_clk_params.pll_id ==
-					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
-	else
-		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-
-	bp->funcs->set_dce_clock(bp, &dce_clk_params);
-
-	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
-		if (clk_dce->dfs_bypass_disp_clk != actual_clock)
-			dmcu->funcs->set_psr_wait_loop(dmcu,
-					actual_clock / 1000 / 7);
-	}
-
-	clk_dce->dfs_bypass_disp_clk = actual_clock;
-	return actual_clock;
-}
-
-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
-{
-	struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
-	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-	struct integrated_info info = { { { 0 } } };
-	struct dc_firmware_info fw_info = { { 0 } };
-	int i;
-
-	if (bp->integrated_info)
-		info = *bp->integrated_info;
-
-	clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
-	if (clk_dce->dentist_vco_freq_khz == 0) {
-		bp->funcs->get_firmware_info(bp, &fw_info);
-		clk_dce->dentist_vco_freq_khz =
-			fw_info.smu_gpu_pll_output_freq;
-		if (clk_dce->dentist_vco_freq_khz == 0)
-			clk_dce->dentist_vco_freq_khz = 3600000;
-	}
-
-	/*update the maximum display clock for each power state*/
-	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
-		enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
-
-		switch (i) {
-		case 0:
-			clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
-			break;
-
-		case 1:
-			clk_state = DM_PP_CLOCKS_STATE_LOW;
-			break;
-
-		case 2:
-			clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
-			break;
-
-		case 3:
-			clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
-			break;
-
-		default:
-			clk_state = DM_PP_CLOCKS_STATE_INVALID;
-			break;
-		}
-
-		/*Do not allow bad VBIOS/SBIOS to override with invalid values,
-		 * check for > 100MHz*/
-		if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
-			clk_dce->max_clks_by_state[clk_state].display_clk_khz =
-				info.disp_clk_voltage[i].max_supported_clk;
-	}
-
-	if (!debug->disable_dfs_bypass && bp->integrated_info)
-		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-			clk_dce->dfs_bypass_enabled = true;
-}
-
-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
-{
-	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
-	int ss_info_num = bp->funcs->get_ss_entry_number(
-			bp, AS_SIGNAL_TYPE_GPU_PLL);
-
-	if (ss_info_num) {
-		struct spread_spectrum_info info = { { 0 } };
-		enum bp_result result = bp->funcs->get_spread_spectrum_info(
-				bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
-
-		/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
-		 * even if SS not enabled and in that case
-		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
-		 * that SS is enabled
-		 */
-		if (result == BP_RESULT_OK &&
-				info.spread_spectrum_percentage != 0) {
-			clk_dce->ss_on_dprefclk = true;
-			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-			if (info.type.CENTER_MODE == 0) {
-				/* TODO: Currently for DP Reference clock we
-				 * need only SS percentage for
-				 * downspread */
-				clk_dce->dprefclk_ss_percentage =
-						info.spread_spectrum_percentage;
-			}
-
-			return;
-		}
-
-		result = bp->funcs->get_spread_spectrum_info(
-				bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
-
-		/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
-		 * even if SS not enabled and in that case
-		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
-		 * that SS is enabled
-		 */
-		if (result == BP_RESULT_OK &&
-				info.spread_spectrum_percentage != 0) {
-			clk_dce->ss_on_dprefclk = true;
-			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
-			if (info.type.CENTER_MODE == 0) {
-				/* Currently for DP Reference clock we
-				 * need only SS percentage for
-				 * downspread */
-				clk_dce->dprefclk_ss_percentage =
-						info.spread_spectrum_percentage;
-			}
-		}
-	}
-}
-
-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
-{
-	return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static void dce12_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-
-	/* TODO: Investigate why this is needed to fix display corruption. */
-	new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
-		new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
-		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-	}
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-	bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
-	int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
-	bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
-
-	/* increase clock, looking for div is 0 for current, request div is 1*/
-	if (dispclk_increase) {
-		/* already divided by 2, no need to reach target clk with 2 steps*/
-		if (cur_dpp_div)
-			return new_clocks->dispclk_khz;
-
-		/* request disp clk is lower than maximum supported dpp clk,
-		 * no need to reach target clk with two steps.
-		 */
-		if (new_clocks->dispclk_khz <= disp_clk_threshold)
-			return new_clocks->dispclk_khz;
-
-		/* target dpp clk not request divided by 2, still within threshold */
-		if (!request_dpp_div)
-			return new_clocks->dispclk_khz;
-
-	} else {
-		/* decrease clock, looking for current dppclk divided by 2,
-		 * request dppclk not divided by 2.
-		 */
-
-		/* current dpp clk not divided by 2, no need to ramp*/
-		if (!cur_dpp_div)
-			return new_clocks->dispclk_khz;
-
-		/* current disp clk is lower than current maximum dpp clk,
-		 * no need to ramp
-		 */
-		if (dccg->clks.dispclk_khz <= disp_clk_threshold)
-			return new_clocks->dispclk_khz;
-
-		/* request dpp clk need to be divided by 2 */
-		if (request_dpp_div)
-			return new_clocks->dispclk_khz;
-	}
-
-	return disp_clk_threshold;
-}
-
-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
-	struct dc *dc = dccg->ctx->dc;
-	int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
-	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
-	int i;
-
-	/* set disp clk to dpp clk threshold */
-	dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
-
-	/* update request dpp clk division option */
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-		if (!pipe_ctx->plane_state)
-			continue;
-
-		pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
-				pipe_ctx->plane_res.dpp,
-				request_dpp_div,
-				true);
-	}
-
-	/* If target clk not same as dppclk threshold, set to target clock */
-	if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
-		dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-
-	dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-	dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
-	dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static void dcn1_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dc *dc = dccg->ctx->dc;
-	struct pp_smu_display_requirement_rv *smu_req_cur =
-			&dc->res_pool->pp_smu_req;
-	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
-	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-	bool send_request_to_increase = false;
-	bool send_request_to_lower = false;
-
-	if (new_clocks->phyclk_khz)
-		smu_req.display_count = 1;
-	else
-		smu_req.display_count = 0;
-
-	if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
-			|| new_clocks->phyclk_khz > dccg->clks.phyclk_khz
-			|| new_clocks->fclk_khz > dccg->clks.fclk_khz
-			|| new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
-		send_request_to_increase = true;
-
-	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
-		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
-		dccg->clks.fclk_khz = new_clocks->fclk_khz;
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
-		clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
-		smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
-
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
-		dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
-		smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (should_set_clock(safe_to_lower,
-			new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
-		dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
-		smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
-
-		send_request_to_lower = true;
-	}
-
-	/* make sure dcf clk is before dpp clk to
-	 * make sure we have enough voltage to run dpp clk
-	 */
-	if (send_request_to_increase) {
-		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		if (pp_smu->set_display_requirement)
-			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-	}
-
-	/* dcn1 dppclk is tied to dispclk */
-	/* program dispclk on = as a w/a for sleep resume clock ramping issues */
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
-			|| new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
-		dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
-		send_request_to_lower = true;
-	}
-
-	if (!send_request_to_increase && send_request_to_lower) {
-		/*use dcfclk to request voltage*/
-		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
-		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
-		if (pp_smu->set_display_requirement)
-			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
-	}
-
-
-	*smu_req_cur = smu_req;
-}
-#endif
-
-static void dce_update_clocks(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
-			bool safe_to_lower)
-{
-	struct dm_pp_power_level_change_request level_change_req;
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-
-	/* TODO: Investigate why this is needed to fix display corruption. */
-	if (!clk_dce->dfs_bypass_active)
-		new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
-	level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
-	/* get max clock state from PPLIB */
-	if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
-			|| level_change_req.power_level > dccg->cur_min_clks_state) {
-		if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
-			dccg->cur_min_clks_state = level_change_req.power_level;
-	}
-
-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
-		new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-	}
-}
-
-static bool dce_update_dfs_bypass(
-       struct dccg *dccg,
-       struct dc *dc,
-       struct dc_state *context,
-       int requested_clock_khz)
-{
-       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-       struct resource_context *res_ctx = &context->res_ctx;
-       enum signal_type signal_type = SIGNAL_TYPE_NONE;
-       bool was_active = clk_dce->dfs_bypass_active;
-       int i;
-
-       /* Disable DFS bypass by default. */
-       clk_dce->dfs_bypass_active = false;
-
-       /* Check that DFS bypass is available. */
-       if (!clk_dce->dfs_bypass_enabled)
-               goto update;
-
-       /* Check if the requested display clock is below the threshold. */
-       if (requested_clock_khz >= 400000)
-               goto update;
-
-       /* DFS-bypass should only be enabled on single stream setups */
-       if (context->stream_count != 1)
-               goto update;
-
-       /* Check that the stream's signal type is an embedded panel */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (res_ctx->pipe_ctx[i].stream) {
-                       struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
-                       signal_type = pipe_ctx->stream->sink->link->connector_signal;
-                       break;
-               }
-       }
-
-       if (signal_type == SIGNAL_TYPE_EDP ||
-               signal_type == SIGNAL_TYPE_LVDS)
-               clk_dce->dfs_bypass_active = true;
-
-update:
-       /* Update the clock state. We don't need to respect safe_to_lower
-        * because DFS bypass should always be greater than the current
-        * display clock frequency.
-        */
-       if (was_active != clk_dce->dfs_bypass_active) {
-               dccg->clks.dispclk_khz =
-                       dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
-               return true;
-       }
-
-       return false;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static const struct display_clock_funcs dcn1_funcs = {
-	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dcn1_update_clocks
-};
-#endif
-
-static const struct display_clock_funcs dce120_funcs = {
-	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dce12_update_clocks
-};
-
-static const struct display_clock_funcs dce112_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce112_set_clock,
-	.update_clocks = dce_update_clocks
-};
-
-static const struct display_clock_funcs dce110_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce_psr_set_clock,
-	.update_clocks = dce_update_clocks,
-	.update_dfs_bypass = dce_update_dfs_bypass
-};
-
-static const struct display_clock_funcs dce_funcs = {
-	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
-	.set_dispclk = dce_set_clock,
-	.update_clocks = dce_update_clocks
-};
-
-static void dce_dccg_construct(
-	struct dce_dccg *clk_dce,
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dccg *base = &clk_dce->base;
-
-	base->ctx = ctx;
-	base->funcs = &dce_funcs;
-
-	clk_dce->regs = regs;
-	clk_dce->clk_shift = clk_shift;
-	clk_dce->clk_mask = clk_mask;
-
-	clk_dce->dfs_bypass_disp_clk = 0;
-
-	clk_dce->dprefclk_ss_percentage = 0;
-	clk_dce->dprefclk_ss_divider = 1000;
-	clk_dce->ss_on_dprefclk = false;
-
-	base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-	base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
-
-	dce_clock_read_integrated_info(clk_dce);
-	dce_clock_read_ss_info(clk_dce);
-}
-
-struct dccg *dce_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce80_max_clks_by_state,
-		sizeof(dce80_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce110_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce110_max_clks_by_state,
-		sizeof(dce110_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	clk_dce->base.funcs = &dce110_funcs;
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce112_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce112_max_clks_by_state,
-		sizeof(dce112_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, regs, clk_shift, clk_mask);
-
-	clk_dce->base.funcs = &dce112_funcs;
-
-	return &clk_dce->base;
-}
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx)
-{
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	memcpy(clk_dce->max_clks_by_state,
-		dce120_max_clks_by_state,
-		sizeof(dce120_max_clks_by_state));
-
-	dce_dccg_construct(
-		clk_dce, ctx, NULL, NULL, NULL);
-
-	clk_dce->dprefclk_khz = 600000;
-	clk_dce->base.funcs = &dce120_funcs;
-
-	return &clk_dce->base;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
-{
-	struct dc_debug_options *debug = &ctx->dc->debug;
-	struct dc_bios *bp = ctx->dc_bios;
-	struct dc_firmware_info fw_info = { { 0 } };
-	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
-	if (clk_dce == NULL) {
-		BREAK_TO_DEBUGGER();
-		return NULL;
-	}
-
-	clk_dce->base.ctx = ctx;
-	clk_dce->base.funcs = &dcn1_funcs;
-
-	clk_dce->dfs_bypass_disp_clk = 0;
-
-	clk_dce->dprefclk_ss_percentage = 0;
-	clk_dce->dprefclk_ss_divider = 1000;
-	clk_dce->ss_on_dprefclk = false;
-
-	clk_dce->dprefclk_khz = 600000;
-	if (bp->integrated_info)
-		clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
-	if (clk_dce->dentist_vco_freq_khz == 0) {
-		bp->funcs->get_firmware_info(bp, &fw_info);
-		clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
-		if (clk_dce->dentist_vco_freq_khz == 0)
-			clk_dce->dentist_vco_freq_khz = 3600000;
-	}
-
-	if (!debug->disable_dfs_bypass && bp->integrated_info)
-		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
-			clk_dce->dfs_bypass_enabled = true;
-
-	dce_clock_read_ss_info(clk_dce);
-
-	return &clk_dce->base;
-}
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg)
-{
-	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
-
-	kfree(clk_dce);
-	*dccg = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
deleted file mode 100644
index 34fdb38..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-
-#ifndef _DCE_CLOCKS_H_
-#define _DCE_CLOCKS_H_
-
-#include "display_clock.h"
-
-#define CLK_COMMON_REG_LIST_DCE_BASE() \
-	.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
-	.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
-
-#define CLK_COMMON_REG_LIST_DCN_BASE() \
-	SR(DENTIST_DISPCLK_CNTL)
-
-#define CLK_SF(reg_name, field_name, post_fix)\
-	.field_name = reg_name ## __ ## field_name ## post_fix
-
-#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
-	CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
-
-#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
-	CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
-
-#define CLK_REG_FIELD_LIST(type) \
-	type DPREFCLK_SRC_SEL; \
-	type DENTIST_DPREFCLK_WDIVIDER; \
-	type DENTIST_DISPCLK_WDIVIDER; \
-	type DENTIST_DISPCLK_CHG_DONE;
-
-struct dccg_shift {
-	CLK_REG_FIELD_LIST(uint8_t)
-};
-
-struct dccg_mask {
-	CLK_REG_FIELD_LIST(uint32_t)
-};
-
-struct dccg_registers {
-	uint32_t DPREFCLK_CNTL;
-	uint32_t DENTIST_DISPCLK_CNTL;
-};
-
-struct dce_dccg {
-	struct dccg base;
-	const struct dccg_registers *regs;
-	const struct dccg_shift *clk_shift;
-	const struct dccg_mask *clk_mask;
-
-	struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
-
-	int dentist_vco_freq_khz;
-
-	/* Cache the status of DFS-bypass feature*/
-	bool dfs_bypass_enabled;
-	/* True if the DFS-bypass feature is enabled and active. */
-	bool dfs_bypass_active;
-	/* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
-	 * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
-	int dfs_bypass_disp_clk;
-
-	/* Flag for Enabled SS on DPREFCLK */
-	bool ss_on_dprefclk;
-	/* DPREFCLK SS percentage (if down-spread enabled) */
-	int dprefclk_ss_percentage;
-	/* DPREFCLK SS percentage Divider (100 or 1000) */
-	int dprefclk_ss_divider;
-	int dprefclk_khz;
-};
-
-
-struct dccg *dce_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce110_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce112_dccg_create(
-	struct dc_context *ctx,
-	const struct dccg_registers *regs,
-	const struct dccg_shift *clk_shift,
-	const struct dccg_mask *clk_mask);
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx);
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg);
-
-#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 64dc753..c83a7f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
 	uint32_t DOMAIN5_PG_CONFIG;
 	uint32_t DOMAIN6_PG_CONFIG;
 	uint32_t DOMAIN7_PG_CONFIG;
+	uint32_t DOMAIN8_PG_CONFIG;
+	uint32_t DOMAIN9_PG_CONFIG;
+	uint32_t DOMAIN10_PG_CONFIG;
+	uint32_t DOMAIN11_PG_CONFIG;
+	uint32_t DOMAIN16_PG_CONFIG;
+	uint32_t DOMAIN17_PG_CONFIG;
+	uint32_t DOMAIN18_PG_CONFIG;
+	uint32_t DOMAIN19_PG_CONFIG;
+	uint32_t DOMAIN20_PG_CONFIG;
+	uint32_t DOMAIN21_PG_CONFIG;
 	uint32_t DOMAIN0_PG_STATUS;
 	uint32_t DOMAIN1_PG_STATUS;
 	uint32_t DOMAIN2_PG_STATUS;
@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
 	uint32_t DOMAIN5_PG_STATUS;
 	uint32_t DOMAIN6_PG_STATUS;
 	uint32_t DOMAIN7_PG_STATUS;
+	uint32_t DOMAIN8_PG_STATUS;
+	uint32_t DOMAIN9_PG_STATUS;
+	uint32_t DOMAIN10_PG_STATUS;
+	uint32_t DOMAIN11_PG_STATUS;
+	uint32_t DOMAIN16_PG_STATUS;
+	uint32_t DOMAIN17_PG_STATUS;
+	uint32_t DOMAIN18_PG_STATUS;
+	uint32_t DOMAIN19_PG_STATUS;
+	uint32_t DOMAIN20_PG_STATUS;
+	uint32_t DOMAIN21_PG_STATUS;
 	uint32_t DIO_MEM_PWR_CTRL;
 	uint32_t DCCG_GATE_DISABLE_CNTL;
 	uint32_t DCCG_GATE_DISABLE_CNTL2;
@@ -262,6 +282,8 @@ struct dce_hwseq_registers {
 	uint32_t D2VGA_CONTROL;
 	uint32_t D3VGA_CONTROL;
 	uint32_t D4VGA_CONTROL;
+	uint32_t D5VGA_CONTROL;
+	uint32_t D6VGA_CONTROL;
 	uint32_t VGA_TEST_CONTROL;
 	/* MMHUB registers. read only. temporary hack */
 	uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
@@ -489,6 +511,26 @@ struct dce_hwseq_registers {
 	type DOMAIN6_POWER_GATE; \
 	type DOMAIN7_POWER_FORCEON; \
 	type DOMAIN7_POWER_GATE; \
+	type DOMAIN8_POWER_FORCEON; \
+	type DOMAIN8_POWER_GATE; \
+	type DOMAIN9_POWER_FORCEON; \
+	type DOMAIN9_POWER_GATE; \
+	type DOMAIN10_POWER_FORCEON; \
+	type DOMAIN10_POWER_GATE; \
+	type DOMAIN11_POWER_FORCEON; \
+	type DOMAIN11_POWER_GATE; \
+	type DOMAIN16_POWER_FORCEON; \
+	type DOMAIN16_POWER_GATE; \
+	type DOMAIN17_POWER_FORCEON; \
+	type DOMAIN17_POWER_GATE; \
+	type DOMAIN18_POWER_FORCEON; \
+	type DOMAIN18_POWER_GATE; \
+	type DOMAIN19_POWER_FORCEON; \
+	type DOMAIN19_POWER_GATE; \
+	type DOMAIN20_POWER_FORCEON; \
+	type DOMAIN20_POWER_GATE; \
+	type DOMAIN21_POWER_FORCEON; \
+	type DOMAIN21_POWER_GATE; \
 	type DOMAIN0_PGFSM_PWR_STATUS; \
 	type DOMAIN1_PGFSM_PWR_STATUS; \
 	type DOMAIN2_PGFSM_PWR_STATUS; \
@@ -497,6 +539,16 @@ struct dce_hwseq_registers {
 	type DOMAIN5_PGFSM_PWR_STATUS; \
 	type DOMAIN6_PGFSM_PWR_STATUS; \
 	type DOMAIN7_PGFSM_PWR_STATUS; \
+	type DOMAIN8_PGFSM_PWR_STATUS; \
+	type DOMAIN9_PGFSM_PWR_STATUS; \
+	type DOMAIN10_PGFSM_PWR_STATUS; \
+	type DOMAIN11_PGFSM_PWR_STATUS; \
+	type DOMAIN16_PGFSM_PWR_STATUS; \
+	type DOMAIN17_PGFSM_PWR_STATUS; \
+	type DOMAIN18_PGFSM_PWR_STATUS; \
+	type DOMAIN19_PGFSM_PWR_STATUS; \
+	type DOMAIN20_PGFSM_PWR_STATUS; \
+	type DOMAIN21_PGFSM_PWR_STATUS; \
 	type DCFCLK_GATE_DIS; \
 	type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
 	type VGA_TEST_ENABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 366bc8c..3e18ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
 		return false;
 
 	/* DCE11 HW does not support 420 */
-	if (!enc110->base.features.ycbcr420_supported &&
+	if (!enc110->base.features.hdmi_ycbcr420_supported &&
 			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 		return false;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c47c818..cce0d18 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank(
 	struct stream_encoder *enc)
 {
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t retries = 0;
 	uint32_t  reg1 = 0;
 	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
 
@@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank(
 	 * (2 = start of the next vertical blank) */
 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
 	/* Larger delay to wait until VBLANK - use max retry of
-	* 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
-	* a little more because we may not trust delay accuracy.
-	*/
+	 * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+	 * a little more because we may not trust delay accuracy.
+	 */
 	max_retries = DP_BLANK_MAX_RETRY * 150;
 
 	/* disable DP stream */
 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
 
 	/* the encoder stops sending the video stream
-	* at the start of the vertical blanking.
-	* Poll for DP_VID_STREAM_STATUS == 0
-	*/
+	 * at the start of the vertical blanking.
+	 * Poll for DP_VID_STREAM_STATUS == 0
+	 */
 
 	REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
 			0,
 			10, max_retries);
 
-	ASSERT(retries <= max_retries);
-
 	/* Tell the DP encoder to ignore timing from CRTC, must be done after
-	* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
-	* complete, stream status will be stuck in video stream enabled state,
-	* i.e. DP_VID_STREAM_STATUS stuck at 1.
-	*/
+	 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+	 * complete, stream status will be stuck in video stream enabled state,
+	 * i.e. DP_VID_STREAM_STATUS stuck at 1.
+	 */
 
 	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 74c05e8..8777167 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -105,73 +105,29 @@ bool dce100_enable_display_power_gating(
 		return false;
 }
 
-static void dce100_pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->avail_mclk_switch_time_us =
-						dce110_get_min_vblank_time_us(context);
-	/*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-		/ MEMORY_TYPE_MULTIPLIER;*/
-
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	uint32_t max_pix_clk = 0;
-	int i;
-
-	for (i = 0; i < MAX_PIPES; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-		if (pipe_ctx->stream == NULL)
-			continue;
-
-		/* do not check under lay */
-		if (pipe_ctx->top_pipe)
-			continue;
-
-		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-			max_pix_clk =
-				pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-	}
-	return max_pix_clk;
-}
-
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed)
+		struct dc_state *context)
 {
-	struct dc_clocks req_clks;
-
-	req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
-	req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
 	dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
-	dc->res_pool->dccg->funcs->update_clocks(
-			dc->res_pool->dccg,
-			&req_clks,
-			decrease_allowed);
-
-	dce100_pplib_apply_display_requirements(dc, context);
+	dc->res_pool->clk_mgr->funcs->update_clocks(
+			dc->res_pool->clk_mgr,
+			context,
+			false);
 }
 
+void dce100_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+	dc->res_pool->clk_mgr->funcs->update_clocks(
+			dc->res_pool->clk_mgr,
+			context,
+			true);
+}
 
 /**************************************************************************/
 
@@ -180,8 +136,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
 	dce110_hw_sequencer_construct(dc);
 
 	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
-	dc->hwss.set_bandwidth = dce100_set_bandwidth;
-	dc->hwss.pplib_apply_display_requirements =
-			dce100_pplib_apply_display_requirements;
+	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+	dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index c6ec0ed..acd4185 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -33,10 +33,9 @@ struct dc_state;
 
 void dce100_hw_sequencer_construct(struct dc *dc);
 
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed);
+		struct dc_state *context);
 
 bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
 					struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 14754a8..6ae51a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -36,11 +36,11 @@
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
 			dce_aud_destroy(&pool->base.audios[i]);
 	}
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.abm != NULL)
 				dce_abm_destroy(&pool->base.abm);
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
 	if (at_least_one_pipe) {
 		/* TODO implement when needed but for now hardcode max value*/
 		context->bw.dce.dispclk_khz = 681000;
-		context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+		context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
 	} else {
 		context->bw.dce.dispclk_khz = 0;
 		context->bw.dce.yclk_khz = 0;
@@ -860,7 +860,6 @@ static bool construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -908,11 +907,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -938,12 +937,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 1f7f250..52d50e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
 
 static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
 
-enum fbc_idle_force {
-	/* Bit 0 - Display registers updated */
-	FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
-	/* Bit 2 - FBC_GRPH_COMP_EN register updated */
-	FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
-	/* Bit 3 - FBC_SRC_SEL register updated */
-	FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
-	/* Bit 4 - FBC_MIN_COMPRESSION register updated */
-	FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
-	/* Bit 5 - FBC_ALPHA_COMP_EN register updated */
-	FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
-	/* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
-	FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
-	/* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
-	FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
-	/* Bit 24 - Memory write to region 0 defined by MC registers. */
-	FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
-	/* Bit 25 - Memory write to region 1 defined by MC registers */
-	FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
-	/* Bit 26 - Memory write to region 2 defined by MC registers */
-	FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
-	/* Bit 27 - Memory write to region 3 defined by MC registers. */
-	FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
-	/* Bit 28 - Memory write from any client other than MCIF */
-	FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
-	/* Bit 29 - CG statics screen signal is inactive */
-	FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
 static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
 {
 	return 256 * ((pixels + 255) / 256);
 }
 
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
 {
-	uint32_t value, frame_count;
+	uint32_t value;
+	uint32_t frame_count;
+	uint32_t status_pos;
 	uint32_t retry = 0;
-	uint32_t status_pos =
-			dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+	struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+	cp110->offsets = reg_offsets[crtc_inst];
+
+	status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
 
 
 	/* Only if CRTC is enabled and counter is moving we wait for one frame. */
-	if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+	if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
 		/* Resetting LB on VBlank */
-		value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+		value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
 		set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
 		set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
-		dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+		dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
 
-		frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+		frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
 
 
 		for (retry = 10000; retry > 0; retry--) {
-			if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+			if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
 				break;
 			udelay(10);
 		}
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
 			dm_error("Frame count did not increase for 100ms.\n");
 
 		/* Resetting LB on VBlank */
-		value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+		value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
 		set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
 		set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
-		dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+		dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
 	}
-
 }
 
 static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
 		uint32_t addr;
 		uint32_t value, misc_value;
 
-
 		addr = mmFBC_CNTL;
 		value = dm_read_reg(compressor->ctx, addr);
 		set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+		/* params->inst is valid HW CRTC instance start from 0 */
 		set_reg_field_value(
 			value,
 			params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
 
 		/* Keep track of enum controller_id FBC is attached to */
 		compressor->is_enabled = true;
-		compressor->attached_inst = params->inst;
-		cp110->offsets = reg_offsets[params->inst];
+		/* attached_inst is SW CRTC instance start from 1
+		 * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+		 */
+		compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
 
 		/* Toggle it as there is bug in HW */
 		set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
 void dce110_compressor_disable_fbc(struct compressor *compressor)
 {
 	struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+	uint32_t crtc_inst = 0;
 
 	if (compressor->options.bits.FBC_SUPPORT) {
-		if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+		if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
 			uint32_t reg_data;
 			/* Turn off compression */
 			reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
 			wait_for_fbc_state_changed(cp110, false);
 		}
 
-		/* Sync line buffer  - dce100/110 only*/
-		reset_lb_on_vblank(compressor->ctx);
+		/* Sync line buffer which fbc was attached to dce100/110 only */
+		if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+			reset_lb_on_vblank(compressor,
+					crtc_inst - CONTROLLER_ID_D0);
 	}
 }
 
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
 	uint32_t compressed_surf_address_low_part =
 		compressor->compr_surface_address.addr.low_part;
 
+	cp110->offsets = reg_offsets[params->inst];
+
 	/* Clear content first. */
 	dm_write_reg(
 		compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
 	value = dm_read_reg(compressor->ctx, addr);
 	set_reg_field_value(
 		value,
-		fbc_trigger |
-		FBC_IDLE_FORCE_GRPH_COMP_EN |
-		FBC_IDLE_FORCE_SRC_SEL_CHANGE |
-		FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
-		FBC_IDLE_FORCE_ALPHA_COMP_EN |
-		FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
-		FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+		fbc_trigger,
 		FBC_IDLE_FORCE_CLEAR_MASK,
 		FBC_IDLE_FORCE_CLEAR_MASK);
 	dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
 	compressor->base.channel_interleave_size = 0;
 	compressor->base.dram_channels_num = 0;
 	compressor->base.lpt_channels_num = 0;
-	compressor->base.attached_inst = 0;
+	compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
 	compressor->base.is_enabled = false;
 	compressor->base.funcs = &dce110_compressor_funcs;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index a6bcb90..6349ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 
 	regamma_params->hw_points_num = hw_points;
 
-	i = 1;
-	for (k = 0; k < 16 && i < 16; k++) {
+	k = 0;
+	for (i = 1; i < 16; i++) {
 		if (seg_distr[k] != -1) {
 			regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
 			regamma_params->arr_curve_points[i].offset =
 					regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
 		}
-		i++;
+		k++;
 	}
 
 	if (seg_distr[k] != -1)
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
 		link->dc->hwss.edp_backlight_control(link, true);
-		stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
 	}
 }
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1192,8 +1191,8 @@ static void build_audio_output(
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
 			pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
 		audio_output->pll_info.dp_dto_source_clock_in_khz =
-				state->dis_clk->funcs->get_dp_ref_clk_frequency(
-						state->dis_clk);
+				state->dccg->funcs->get_dp_ref_clk_frequency(
+						state->dccg);
 	}
 
 	audio_output->pll_info.feed_back_divider =
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 	int i;
 	struct dc_link *edp_link_to_turnoff = NULL;
 	struct dc_link *edp_link = get_link_for_edp(dc);
+	struct dc_bios *bios = dc->ctx->dc_bios;
 	bool can_edp_fast_boot_optimize = false;
 	bool apply_edp_fast_boot_optimization = false;
 
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 			if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 				context->streams[i]->apply_edp_fast_boot_optimization = true;
 				apply_edp_fast_boot_optimization = true;
+
+				/* When after S4 and S5, vbios may post edp and previous dpms_off
+				 * doesn't make sense.
+				 * Update dpms_off state to align hw and sw state via check
+				 * vBios scratch register.
+				 */
+				if (bios->funcs->is_active_display)	{
+					const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
+
+					if (bios->funcs->is_active_display(bios,
+							context->streams[i]->signal,
+							device_tag))
+						context->streams[i]->dpms_off = false;
+				}
 			}
 		}
 	}
@@ -1748,44 +1762,17 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
 			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
 }
 
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	uint32_t max_pix_clk = 0;
-	int i;
-
-	for (i = 0; i < MAX_PIPES; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-		if (pipe_ctx->stream == NULL)
-			continue;
-
-		/* do not check under lay */
-		if (pipe_ctx->top_pipe)
-			continue;
-
-		if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
-			max_pix_clk =
-				pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
-	}
-
-	return max_pix_clk;
-}
-
 /*
  *  Check if FBC can be enabled
  */
 static bool should_enable_fbc(struct dc *dc,
-			      struct dc_state *context,
-			      uint32_t *pipe_idx)
+		struct dc_state *context,
+		uint32_t *pipe_idx)
 {
 	uint32_t i;
 	struct pipe_ctx *pipe_ctx = NULL;
 	struct resource_context *res_ctx = &context->res_ctx;
+	unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
 
 
 	ASSERT(dc->fbc_compressor);
@@ -1800,14 +1787,28 @@ static bool should_enable_fbc(struct dc *dc,
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		if (res_ctx->pipe_ctx[i].stream) {
+
 			pipe_ctx = &res_ctx->pipe_ctx[i];
-			*pipe_idx = i;
-			break;
+
+			if (!pipe_ctx)
+				continue;
+
+			/* fbc not applicable on underlay pipe */
+			if (pipe_ctx->pipe_idx != underlay_idx) {
+				*pipe_idx = i;
+				break;
+			}
 		}
 	}
 
-	/* Pipe context should be found */
-	ASSERT(pipe_ctx);
+	if (i == dc->res_pool->pipe_count)
+		return false;
+
+	if (!pipe_ctx->stream->sink)
+		return false;
+
+	if (!pipe_ctx->stream->sink->link)
+		return false;
 
 	/* Only supports eDP */
 	if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1831,8 +1832,9 @@ static bool should_enable_fbc(struct dc *dc,
 /*
  *  Enable FBC
  */
-static void enable_fbc(struct dc *dc,
-		       struct dc_state *context)
+static void enable_fbc(
+		struct dc *dc,
+		struct dc_state *context)
 {
 	uint32_t pipe_idx = 0;
 
@@ -1842,10 +1844,9 @@ static void enable_fbc(struct dc *dc,
 		struct compressor *compr = dc->fbc_compressor;
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
 
-
 		params.source_view_width = pipe_ctx->stream->timing.h_addressable;
 		params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+		params.inst = pipe_ctx->stream_res.tg->inst;
 		compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
 
 		compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2060,10 +2061,10 @@ enum dc_status dce110_apply_ctx_to_hw(
 			return status;
 	}
 
-	dcb->funcs->set_scratch_critical_state(dcb, false);
-
 	if (dc->fbc_compressor)
-		enable_fbc(dc, context);
+		enable_fbc(dc, dc->current_state);
+
+	dcb->funcs->set_scratch_critical_state(dcb, false);
 
 	return DC_OK;
 }
@@ -2296,7 +2297,7 @@ static void dce110_enable_per_frame_crtc_position_reset(
 	int i;
 
 	gsl_params.gsl_group = 0;
-	gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+	gsl_params.gsl_master = 0;
 
 	for (i = 0; i < group_size; i++)
 		grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
@@ -2385,193 +2386,33 @@ static void init_hw(struct dc *dc)
 
 }
 
-void dce110_fill_display_configs(
-	const struct dc_state *context,
-	struct dm_pp_display_configuration *pp_display_cfg)
-{
-	int j;
-	int num_cfgs = 0;
 
-	for (j = 0; j < context->stream_count; j++) {
-		int k;
-
-		const struct dc_stream_state *stream = context->streams[j];
-		struct dm_pp_single_disp_config *cfg =
-			&pp_display_cfg->disp_configs[num_cfgs];
-		const struct pipe_ctx *pipe_ctx = NULL;
-
-		for (k = 0; k < MAX_PIPES; k++)
-			if (stream == context->res_ctx.pipe_ctx[k].stream) {
-				pipe_ctx = &context->res_ctx.pipe_ctx[k];
-				break;
-			}
-
-		ASSERT(pipe_ctx != NULL);
-
-		/* only notify active stream */
-		if (stream->dpms_off)
-			continue;
-
-		num_cfgs++;
-		cfg->signal = pipe_ctx->stream->signal;
-		cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
-		cfg->src_height = stream->src.height;
-		cfg->src_width = stream->src.width;
-		cfg->ddi_channel_mapping =
-			stream->sink->link->ddi_channel_mapping.raw;
-		cfg->transmitter =
-			stream->sink->link->link_enc->transmitter;
-		cfg->link_settings.lane_count =
-			stream->sink->link->cur_link_settings.lane_count;
-		cfg->link_settings.link_rate =
-			stream->sink->link->cur_link_settings.link_rate;
-		cfg->link_settings.link_spread =
-			stream->sink->link->cur_link_settings.link_spread;
-		cfg->sym_clock = stream->phy_pix_clk;
-		/* Round v_refresh*/
-		cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
-		cfg->v_refresh /= stream->timing.h_total;
-		cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
-							/ stream->timing.v_total;
-	}
-
-	pp_display_cfg->display_count = num_cfgs;
-}
-
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
-{
-	uint8_t j;
-	uint32_t min_vertical_blank_time = -1;
-
-	for (j = 0; j < context->stream_count; j++) {
-		struct dc_stream_state *stream = context->streams[j];
-		uint32_t vertical_blank_in_pixels = 0;
-		uint32_t vertical_blank_time = 0;
-
-		vertical_blank_in_pixels = stream->timing.h_total *
-			(stream->timing.v_total
-			 - stream->timing.v_addressable);
-
-		vertical_blank_time = vertical_blank_in_pixels
-			* 1000 / stream->timing.pix_clk_khz;
-
-		if (min_vertical_blank_time > vertical_blank_time)
-			min_vertical_blank_time = vertical_blank_time;
-	}
-
-	return min_vertical_blank_time;
-}
-
-static int determine_sclk_from_bounding_box(
-		const struct dc *dc,
-		int required_sclk)
-{
-	int i;
-
-	/*
-	 * Some asics do not give us sclk levels, so we just report the actual
-	 * required sclk
-	 */
-	if (dc->sclk_lvls.num_levels == 0)
-		return required_sclk;
-
-	for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
-		if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
-			return dc->sclk_lvls.clocks_in_khz[i];
-	}
-	/*
-	 * even maximum level could not satisfy requirement, this
-	 * is unexpected at this stage, should have been caught at
-	 * validation time
-	 */
-	ASSERT(0);
-	return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
-}
-
-static void pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->all_displays_in_sync =
-		context->bw.dce.all_displays_in_sync;
-	pp_display_cfg->nb_pstate_switch_disable =
-			context->bw.dce.nbp_state_change_enable == false;
-	pp_display_cfg->cpu_cc6_disable =
-			context->bw.dce.cpuc_state_change_enable == false;
-	pp_display_cfg->cpu_pstate_disable =
-			context->bw.dce.cpup_state_change_enable == false;
-	pp_display_cfg->cpu_pstate_separation_time =
-			context->bw.dce.blackout_recovery_time_us;
-
-	pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
-		/ MEMORY_TYPE_MULTIPLIER;
-
-	pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
-			dc,
-			context->bw.dce.sclk_khz);
-
-	pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
-
-	pp_display_cfg->min_engine_clock_deep_sleep_khz
-			= context->bw.dce.sclk_deep_sleep_khz;
-
-	pp_display_cfg->avail_mclk_switch_time_us =
-						dce110_get_min_vblank_time_us(context);
-	/* TODO: dce11.2*/
-	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
-	pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	/* TODO: is this still applicable?*/
-	if (pp_display_cfg->display_count == 1) {
-		const struct dc_crtc_timing *timing =
-			&context->streams[0]->timing;
-
-		pp_display_cfg->crtc_index =
-			pp_display_cfg->disp_configs[0].pipe_idx;
-		pp_display_cfg->line_time_in_us = timing->h_total * 1000
-							/ timing->pix_clk_khz;
-	}
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-static void dce110_set_bandwidth(
+void dce110_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool decrease_allowed)
+		struct dc_state *context)
 {
-	struct dc_clocks req_clks;
-	struct dccg *dccg = dc->res_pool->dccg;
+	struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 
-	req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
-	req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
-	if (decrease_allowed)
-		dce110_set_displaymarks(dc, context);
-	else
-		dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-	if (dccg->funcs->update_dfs_bypass)
-		dccg->funcs->update_dfs_bypass(
-			dccg,
-			dc,
-			context,
-			req_clks.dispclk_khz);
+	dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
 	dccg->funcs->update_clocks(
 			dccg,
-			&req_clks,
-			decrease_allowed);
-	pplib_apply_display_requirements(dc, context);
+			context,
+			false);
+}
+
+void dce110_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+	dce110_set_displaymarks(dc, context);
+
+	dccg->funcs->update_clocks(
+			dccg,
+			context,
+			true);
 }
 
 static void dce110_program_front_end_for_pipe(
@@ -2582,7 +2423,6 @@ static void dce110_program_front_end_for_pipe(
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct xfm_grph_csc_adjustment adjust;
 	struct out_csc_color_matrix tbl_entry;
-	unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
 	unsigned int i;
 	DC_LOGGER_INIT();
 	memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2623,15 +2463,6 @@ static void dce110_program_front_end_for_pipe(
 
 	program_scaler(dc, pipe_ctx);
 
-	/* fbc not applicable on Underlay pipe */
-	if (dc->fbc_compressor && old_pipe->stream &&
-	    pipe_ctx->pipe_idx != underlay_idx) {
-		if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
-			dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-		else
-			enable_fbc(dc, dc->current_state);
-	}
-
 	mi->funcs->mem_input_program_surface_config(
 			mi,
 			plane_state->format,
@@ -2708,6 +2539,9 @@ static void dce110_apply_ctx_for_surface(
 	if (num_planes == 0)
 		return;
 
+	if (dc->fbc_compressor)
+		dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2750,6 +2584,9 @@ static void dce110_apply_ctx_for_surface(
 			(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
 	}
+
+	if (dc->fbc_compressor)
+		enable_fbc(dc, dc->current_state);
 }
 
 static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -2776,28 +2613,6 @@ static void dce110_wait_for_mpcc_disconnect(
 	/* do nothing*/
 }
 
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
-		enum dc_color_space colorspace,
-		uint16_t *matrix)
-{
-	int i;
-	struct out_csc_color_matrix tbl_entry;
-
-	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
-				== true) {
-			enum dc_color_space color_space =
-				pipe_ctx->stream->output_color_space;
-
-			//uint16_t matrix[12];
-			for (i = 0; i < 12; i++)
-				tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
-			tbl_entry.color_space = color_space;
-			//tbl_entry.regval = matrix;
-			pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
-	}
-}
-
 void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2846,13 +2661,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 				pipe_ctx->plane_res.xfm, attributes);
 }
 
-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
-
-static void optimize_shared_resources(struct dc *dc) {}
-
 static const struct hw_sequencer_funcs dce110_funcs = {
 	.program_gamut_remap = program_gamut_remap,
-	.program_csc_matrix = program_csc_matrix,
 	.init_hw = init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
@@ -2875,7 +2685,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.enable_display_power_gating = dce110_enable_display_power_gating,
 	.disable_plane = dce110_power_down_fe,
 	.pipe_control_lock = dce_pipe_control_lock,
-	.set_bandwidth = dce110_set_bandwidth,
+	.prepare_bandwidth = dce110_prepare_bandwidth,
+	.optimize_bandwidth = dce110_optimize_bandwidth,
 	.set_drr = set_drr,
 	.get_position = get_position,
 	.set_static_screen_control = set_static_screen_control,
@@ -2884,9 +2695,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.setup_stereo = NULL,
 	.set_avmute = dce110_set_avmute,
 	.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
-	.ready_shared_resources = ready_shared_resources,
-	.optimize_shared_resources = optimize_shared_resources,
-	.pplib_apply_display_requirements = pplib_apply_display_requirements,
 	.edp_backlight_control = hwss_edp_backlight_control,
 	.edp_power_control = hwss_edp_power_control,
 	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3db..cd3e36d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
 		struct dc_state *context);
 
 
-
 void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
 
 void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks(
 		struct resource_context *res_ctx,
 		const struct resource_pool *pool);
 
-void dce110_fill_display_configs(
-	const struct dc_state *context,
-	struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_prepare_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
 
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+void dce110_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c9fd90..e33d117 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "dce110/dce110_resource.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "include/irq_service_interface.h"
 #include "dce/dce_audio.h"
 #include "dce110/dce110_timing_generator.h"
@@ -45,7 +46,6 @@
 #include "dce110/dce110_transform_v.h"
 #include "dce/dce_opp.h"
 #include "dce110/dce110_opp_v.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_hwseq.h"
 #include "dce110/dce110_hw_sequencer.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
 #define SRI(reg_name, block, id)\
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 			&clks);
 
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+		clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+		clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+		clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 }
 
@@ -1201,7 +1201,6 @@ static bool construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1257,11 +1256,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce110_dccg_create(ctx,
+	pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1287,13 +1286,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-				static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 3ce79c2..969d4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -35,6 +35,7 @@
 
 #include "irq/dce110/irq_service_dce110.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_link_encoder.h"
@@ -42,7 +43,6 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_ipp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 
 #include "dce/dce_hwseq.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 				&clks);
 
 		dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+			clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 		dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+			clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
 			1000);
 		dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-			clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+			clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
 			1000);
 
 		return;
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	 * YCLK = UMACLK*m_memoryTypeMultiplier
 	 */
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1131,7 +1132,6 @@ static bool construct(
 {
 	unsigned int i;
 	struct dc_context *ctx = dc->ctx;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1199,11 +1199,11 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce112_dccg_create(ctx,
+	pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1229,13 +1229,6 @@ static bool construct(
 		goto res_create_fail;
 	}
 
-	/* get static clock information for PPLIB or firmware, save
-	 * max_clock_state
-	 */
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-				static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 79ab5f9..f126966 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -31,6 +31,7 @@
 #include "resource.h"
 #include "include/irq_service_interface.h"
 #include "dce120_resource.h"
+
 #include "dce112/dce112_resource.h"
 
 #include "dce110/dce110_resource.h"
@@ -39,7 +40,6 @@
 #include "irq/dce120/irq_service_dce120.h"
 #include "dce/dce_opp.h"
 #include "dce/dce_clock_source.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_mem_input.h"
 
@@ -47,6 +47,7 @@
 #include "dce120/dce120_hw_sequencer.h"
 #include "dce/dce_transform.h"
 
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 }
 
 static void read_dce_straps(
@@ -606,7 +607,8 @@ static struct audio *create_audio(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	 * YCLK = UMACLK*m_memoryTypeMultiplier
 	 */
 	dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+		mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
 	dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 	dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+		mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
 		1000);
 
 	/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -973,8 +975,8 @@ static bool construct(
 		}
 	}
 
-	pool->base.dccg = dce120_dccg_create(ctx);
-	if (pool->base.dccg == NULL) {
+	pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto dccg_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 6c6a1a1..a60a90e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
 
 	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
 	dc->hwss.pipe_control_lock = dce_pipe_control_lock;
-	dc->hwss.set_bandwidth = dce100_set_bandwidth;
+	dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+	dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index d68f951..cdd1d6b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -37,14 +37,13 @@
 #include "dce110/dce110_timing_generator.h"
 #include "dce110/dce110_resource.h"
 #include "dce80/dce80_timing_generator.h"
+#include "dce/dce_clk_mgr.h"
 #include "dce/dce_mem_input.h"
 #include "dce/dce_link_encoder.h"
 #include "dce/dce_stream_encoder.h"
-#include "dce/dce_mem_input.h"
 #include "dce/dce_ipp.h"
 #include "dce/dce_transform.h"
 #include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
@@ -155,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
 	.reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
 		CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
 		CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -779,8 +778,8 @@ static void destruct(struct dce110_resource_pool *pool)
 		}
 	}
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	if (pool->base.irqs != NULL) {
 		dal_irq_service_destroy(&pool->base.irqs);
@@ -793,7 +792,7 @@ bool dce80_validate_bandwidth(
 {
 	/* TODO implement when needed but for now hardcode max value*/
 	context->bw.dce.dispclk_khz = 681000;
-	context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+	context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
 
 	return true;
 }
@@ -855,7 +854,6 @@ static bool dce80_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -918,11 +916,11 @@ static bool dce80_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -948,10 +946,6 @@ static bool dce80_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
@@ -1065,7 +1059,6 @@ static bool dce81_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1128,11 +1121,11 @@ static bool dce81_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1158,10 +1151,6 @@ static bool dce81_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
@@ -1275,7 +1264,6 @@ static bool dce83_construct(
 	struct dc_context *ctx = dc->ctx;
 	struct dc_firmware_info info;
 	struct dc_bios *bp;
-	struct dm_pp_static_clock_info static_clk_info = {0};
 
 	ctx->dc_bios->regs = &bios_regs;
 
@@ -1334,11 +1322,11 @@ static bool dce83_construct(
 		}
 	}
 
-	pool->base.dccg = dce_dccg_create(ctx,
+	pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 			&disp_clk_regs,
 			&disp_clk_shift,
 			&disp_clk_mask);
-	if (pool->base.dccg == NULL) {
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto res_create_fail;
@@ -1364,10 +1352,6 @@ static bool dce83_construct(
 		goto res_create_fail;
 	}
 
-	if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-		pool->base.dccg->max_clks_state =
-					static_clk_info.max_clocks_state;
-
 	{
 		struct irq_service_init_data init_data;
 		init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872..55f293c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,7 +24,7 @@
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
 		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
-		dcn10_hubp.o dcn10_mpc.o \
+		dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
 		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
 		dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
new file mode 100644
index 0000000..54abedb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+	container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+	(clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+	clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+	clk_mgr->ctx->logger
+
+void dcn1_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context)
+{
+	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+	pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+	pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
+	pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+	pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+	pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+	pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+	dce110_fill_display_configs(context, pp_display_cfg);
+
+	dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+	bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+	int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+	bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+
+	/* increase clock, looking for div is 0 for current, request div is 1*/
+	if (dispclk_increase) {
+		/* already divided by 2, no need to reach target clk with 2 steps*/
+		if (cur_dpp_div)
+			return new_clocks->dispclk_khz;
+
+		/* request disp clk is lower than maximum supported dpp clk,
+		 * no need to reach target clk with two steps.
+		 */
+		if (new_clocks->dispclk_khz <= disp_clk_threshold)
+			return new_clocks->dispclk_khz;
+
+		/* target dpp clk not request divided by 2, still within threshold */
+		if (!request_dpp_div)
+			return new_clocks->dispclk_khz;
+
+	} else {
+		/* decrease clock, looking for current dppclk divided by 2,
+		 * request dppclk not divided by 2.
+		 */
+
+		/* current dpp clk not divided by 2, no need to ramp*/
+		if (!cur_dpp_div)
+			return new_clocks->dispclk_khz;
+
+		/* current disp clk is lower than current maximum dpp clk,
+		 * no need to ramp
+		 */
+		if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+			return new_clocks->dispclk_khz;
+
+		/* request dpp clk need to be divided by 2 */
+		if (request_dpp_div)
+			return new_clocks->dispclk_khz;
+	}
+
+	return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+	struct dc *dc = clk_mgr->ctx->dc;
+	int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
+	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+	int i;
+
+	/* set disp clk to dpp clk threshold */
+	dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+	/* update request dpp clk division option */
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->plane_state)
+			continue;
+
+		pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+				pipe_ctx->plane_res.dpp,
+				request_dpp_div,
+				true);
+	}
+
+	/* If target clk not same as dppclk threshold, set to target clock */
+	if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+		dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
+
+	clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+	clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+	clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static int get_active_display_cnt(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	int i, display_count;
+
+	display_count = 0;
+	for (i = 0; i < context->stream_count; i++) {
+		const struct dc_stream_state *stream = context->streams[i];
+
+		/*
+		 * Only notify active stream or virtual stream.
+		 * Need to notify virtual stream to work around
+		 * headless case. HPD does not fire when system is in
+		 * S0i2.
+		 */
+		if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+			display_count++;
+	}
+
+	return display_count;
+}
+
+static void notify_deep_sleep_dcfclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+{
+	int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+		return;
+
+	min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+	pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+}
+
+static void notify_hard_min_dcfclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+{
+	int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+		return;
+
+	min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+
+	pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+}
+
+static void notify_hard_min_fclk_to_smu(
+		struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
+{
+	int min_f_clk_mhz; //minimum required F clock in mhz
+
+	/*
+	 * if function pointer not set up, this message is
+	 * sent as part of pplib_apply_display_requirements.
+	 * So just return.
+	 */
+	if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
+		return;
+
+	min_f_clk_mhz = min_f_clk_khz / 1000;
+
+	pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
+}
+
+static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
+			bool safe_to_lower)
+{
+	struct dc *dc = clk_mgr->ctx->dc;
+	struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+	struct pp_smu_display_requirement_rv *smu_req_cur =
+			&dc->res_pool->pp_smu_req;
+	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+	uint32_t requested_dcf_clock_in_khz = 0;
+	bool send_request_to_increase = false;
+	bool send_request_to_lower = false;
+	int display_count;
+
+	bool enter_display_off = false;
+
+	display_count = get_active_display_cnt(dc, context);
+
+	if (display_count == 0)
+		enter_display_off = true;
+
+	if (enter_display_off == safe_to_lower) {
+		/*
+		 * Notify SMU active displays
+		 * if function pointer not set up, this message is
+		 * sent as part of pplib_apply_display_requirements.
+		 */
+		if (pp_smu->set_display_count)
+			pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+		else
+			smu_req.display_count = display_count;
+
+	}
+
+	if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
+			|| new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
+			|| new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
+			|| new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+		send_request_to_increase = true;
+
+	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+		clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+		send_request_to_lower = true;
+	}
+
+	// F Clock
+	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
+		clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+		smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
+
+		notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
+
+		send_request_to_lower = true;
+	}
+
+	//DCF Clock
+	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+		clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+		smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
+
+		send_request_to_lower = true;
+	}
+
+	if (should_set_clock(safe_to_lower,
+			new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+		clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+		smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
+
+		send_request_to_lower = true;
+	}
+
+	/* make sure dcf clk is before dpp clk to
+	 * make sure we have enough voltage to run dpp clk
+	 */
+	if (send_request_to_increase) {
+		/*use dcfclk to request voltage*/
+		requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+		notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+		if (pp_smu->set_display_requirement)
+			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+		notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+		dcn1_pplib_apply_display_requirements(dc, context);
+	}
+
+	/* dcn1 dppclk is tied to dispclk */
+	/* program dispclk on = as a w/a for sleep resume clock ramping issues */
+	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
+			|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
+		dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
+		clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+		send_request_to_lower = true;
+	}
+
+	if (!send_request_to_increase && send_request_to_lower) {
+		/*use dcfclk to request voltage*/
+		requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+		notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+		if (pp_smu->set_display_requirement)
+			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+		notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+		dcn1_pplib_apply_display_requirements(dc, context);
+	}
+
+
+	*smu_req_cur = smu_req;
+}
+static const struct clk_mgr_funcs dcn1_funcs = {
+	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+	.update_clocks = dcn1_update_clocks
+};
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+{
+	struct dc_debug_options *debug = &ctx->dc->debug;
+	struct dc_bios *bp = ctx->dc_bios;
+	struct dc_firmware_info fw_info = { { 0 } };
+	struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+	if (clk_mgr_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	clk_mgr_dce->base.ctx = ctx;
+	clk_mgr_dce->base.funcs = &dcn1_funcs;
+
+	clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+	clk_mgr_dce->dprefclk_ss_percentage = 0;
+	clk_mgr_dce->dprefclk_ss_divider = 1000;
+	clk_mgr_dce->ss_on_dprefclk = false;
+
+	clk_mgr_dce->dprefclk_khz = 600000;
+	if (bp->integrated_info)
+		clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+	if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+		bp->funcs->get_firmware_info(bp, &fw_info);
+		clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+		if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+			clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+	}
+
+	if (!debug->disable_dfs_bypass && bp->integrated_info)
+		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+			clk_mgr_dce->dfs_bypass_enabled = true;
+
+	dce_clock_read_ss_info(clk_mgr_dce);
+
+	return &clk_mgr_dce->base;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
new file mode 100644
index 0000000..a995eda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN10_CLK_MGR_H__
+#define __DCN10_CLK_MGR_H__
+
+#include "../dce/dce_clk_mgr.h"
+
+struct clk_bypass {
+	uint32_t dcfclk_bypass;
+	uint32_t dispclk_pypass;
+	uint32_t dprefclk_bypass;
+};
+
+void dcn1_pplib_apply_display_requirements(
+	struct dc *dc,
+	struct dc_state *context);
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
+
+#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 5d95a99..7469333 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func(
 	unsigned int i = 0;
 
 	REG_SET_2(reg->start_cntl_b, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].blue.custom_float_x,
 			exp_resion_start_segment, 0);
 	REG_SET_2(reg->start_cntl_g, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].green.custom_float_x,
 			exp_resion_start_segment, 0);
 	REG_SET_2(reg->start_cntl_r, 0,
-			exp_region_start, params->arr_points[0].custom_float_x,
+			exp_region_start, params->corner_points[0].red.custom_float_x,
 			exp_resion_start_segment, 0);
 
 	REG_SET(reg->start_slope_cntl_b, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
 	REG_SET(reg->start_slope_cntl_g, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
 	REG_SET(reg->start_slope_cntl_r, 0,
-			field_region_linear_slope, params->arr_points[0].custom_float_slope);
+			field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
 
 	REG_SET(reg->start_end_cntl1_b, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].blue.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_b, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-			field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+			field_region_end_base, params->corner_points[1].blue.custom_float_y);
 
 	REG_SET(reg->start_end_cntl1_g, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].green.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_g, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-		field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+		field_region_end_base, params->corner_points[1].green.custom_float_y);
 
 	REG_SET(reg->start_end_cntl1_r, 0,
-			field_region_end, params->arr_points[1].custom_float_x);
+			field_region_end, params->corner_points[1].red.custom_float_x);
 	REG_SET_2(reg->start_end_cntl2_r, 0,
-			field_region_end_slope, params->arr_points[1].custom_float_slope,
-		field_region_end_base, params->arr_points[1].custom_float_y);
+			field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+		field_region_end_base, params->corner_points[1].red.custom_float_y);
 
 	for (reg_region_cur = reg->region_start;
 			reg_region_cur <= reg->region_end;
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
 		struct pwl_result_data *rgb_resulted,
-		struct curve_points *arr_points,
+		struct curve_points3 *corner_points,
 		uint32_t hw_points_num,
 		bool fixpoint)
 {
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float(
 	fmt.mantissa_bits = 12;
 	fmt.sign = false;
 
-	if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
-					    &arr_points[0].custom_float_x)) {
+	/* corner_points[0] - beginning base, slope offset for R,G,B
+	 * corner_points[1] - end base, slope offset for R,G,B
+	 */
+	if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+				&corner_points[0].red.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+				&corner_points[0].green.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+				&corner_points[0].blue.custom_float_x)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
-					    &arr_points[0].custom_float_offset)) {
+	if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+				&corner_points[0].red.custom_float_offset)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+				&corner_points[0].green.custom_float_offset)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+				&corner_points[0].blue.custom_float_offset)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
-					    &arr_points[0].custom_float_slope)) {
+	if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+				&corner_points[0].red.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+				&corner_points[0].green.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+				&corner_points[0].blue.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float(
 	fmt.mantissa_bits = 10;
 	fmt.sign = false;
 
-	if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
-					    &arr_points[1].custom_float_x)) {
+	if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+				&corner_points[1].red.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+				&corner_points[1].green.custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+				&corner_points[1].blue.custom_float_x)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
 
-	if (fixpoint == true)
-		arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
-	else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
-		&arr_points[1].custom_float_y)) {
+	if (fixpoint == true) {
+		corner_points[1].red.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+		corner_points[1].green.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+		corner_points[1].blue.custom_float_y =
+				dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+	} else {
+		if (!convert_to_custom_float_format(corner_points[1].red.y,
+				&fmt, &corner_points[1].red.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+		if (!convert_to_custom_float_format(corner_points[1].green.y,
+				&fmt, &corner_points[1].green.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+		if (!convert_to_custom_float_format(corner_points[1].blue.y,
+				&fmt, &corner_points[1].blue.custom_float_y)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+	}
+
+	if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+				&corner_points[1].red.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
-
-	if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
-					    &arr_points[1].custom_float_slope)) {
+	if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+				&corner_points[1].green.custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+	if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+				&corner_points[1].blue.custom_float_slope)) {
 		BREAK_TO_DEBUGGER();
 		return false;
 	}
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format(
 				const struct dc_transfer_func *output_tf,
 				struct pwl_params *lut_params, bool fixpoint)
 {
-	struct curve_points *arr_points;
+	struct curve_points3 *corner_points;
 	struct pwl_result_data *rgb_resulted;
 	struct pwl_result_data *rgb;
 	struct pwl_result_data *rgb_plus_1;
-	struct fixed31_32 y_r;
-	struct fixed31_32 y_g;
-	struct fixed31_32 y_b;
-	struct fixed31_32 y1_min;
-	struct fixed31_32 y3_max;
 
 	int32_t region_start, region_end;
 	int32_t i;
@@ -259,16 +324,16 @@ bool cm_helper_translate_curve_to_hw_format(
 	if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
 		return false;
 
-	PERF_TRACE();
+	PERF_TRACE_CTX(output_tf->ctx);
 
-	arr_points = lut_params->arr_points;
+	corner_points = lut_params->corner_points;
 	rgb_resulted = lut_params->rgb_resulted;
 	hw_points = 0;
 
 	memset(lut_params, 0, sizeof(struct pwl_params));
 	memset(seg_distr, 0, sizeof(seg_distr));
 
-	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+	if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
 		/* 32 segments
 		 * segments are from 2^-25 to 2^7
 		 */
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	// All 3 color channels have same x
+	corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_start));
-	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].green.x = corner_points[0].red.x;
+	corner_points[0].blue.x = corner_points[0].red.x;
+
+	corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_end));
+	corner_points[1].green.x = corner_points[1].red.x;
+	corner_points[1].blue.x = corner_points[1].red.x;
 
-	y_r = rgb_resulted[0].red;
-	y_g = rgb_resulted[0].green;
-	y_b = rgb_resulted[0].blue;
+	corner_points[0].red.y = rgb_resulted[0].red;
+	corner_points[0].green.y = rgb_resulted[0].green;
+	corner_points[0].blue.y = rgb_resulted[0].blue;
 
-	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
-	arr_points[0].y = y1_min;
-	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-	y_r = rgb_resulted[hw_points - 1].red;
-	y_g = rgb_resulted[hw_points - 1].green;
-	y_b = rgb_resulted[hw_points - 1].blue;
+	corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+			corner_points[0].red.x);
+	corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+			corner_points[0].green.x);
+	corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+			corner_points[0].blue.x);
 
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-	arr_points[1].y = y3_max;
-
-	arr_points[1].slope = dc_fixpt_zero;
+	corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+	corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+	corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+	corner_points[1].red.slope = dc_fixpt_zero;
+	corner_points[1].green.slope = dc_fixpt_zero;
+	corner_points[1].blue.slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format(
 		const struct fixed31_32 end_value =
 				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dc_fixpt_div(
-			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-			dc_fixpt_sub(end_value, arr_points[1].x));
+		corner_points[1].red.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+			dc_fixpt_sub(end_value, corner_points[1].red.x));
+		corner_points[1].green.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+			dc_fixpt_sub(end_value, corner_points[1].green.x));
+		corner_points[1].blue.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+			dc_fixpt_sub(end_value, corner_points[1].blue.x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format(
 		++i;
 	}
 	cm_helper_convert_to_custom_float(rgb_resulted,
-						lut_params->arr_points,
+						lut_params->corner_points,
 						hw_points, fixpoint);
 
 	return true;
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 				const struct dc_transfer_func *output_tf,
 				struct pwl_params *lut_params)
 {
-	struct curve_points *arr_points;
+	struct curve_points3 *corner_points;
 	struct pwl_result_data *rgb_resulted;
 	struct pwl_result_data *rgb;
 	struct pwl_result_data *rgb_plus_1;
-	struct fixed31_32 y_r;
-	struct fixed31_32 y_g;
-	struct fixed31_32 y_b;
-	struct fixed31_32 y1_min;
-	struct fixed31_32 y3_max;
 
 	int32_t region_start, region_end;
 	int32_t i;
@@ -441,9 +513,9 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 	if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
 		return false;
 
-	PERF_TRACE();
+	PERF_TRACE_CTX(output_tf->ctx);
 
-	arr_points = lut_params->arr_points;
+	corner_points = lut_params->corner_points;
 	rgb_resulted = lut_params->rgb_resulted;
 	hw_points = 0;
 
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_start));
-	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+	corner_points[0].green.x = corner_points[0].red.x;
+	corner_points[0].blue.x = corner_points[0].red.x;
+	corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 					     dc_fixpt_from_int(region_end));
+	corner_points[1].green.x = corner_points[1].red.x;
+	corner_points[1].blue.x = corner_points[1].red.x;
 
-	y_r = rgb_resulted[0].red;
-	y_g = rgb_resulted[0].green;
-	y_b = rgb_resulted[0].blue;
-
-	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
-	arr_points[0].y = y1_min;
-	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
-	y_r = rgb_resulted[hw_points - 1].red;
-	y_g = rgb_resulted[hw_points - 1].green;
-	y_b = rgb_resulted[hw_points - 1].blue;
+	corner_points[0].red.y = rgb_resulted[0].red;
+	corner_points[0].green.y = rgb_resulted[0].green;
+	corner_points[0].blue.y = rgb_resulted[0].blue;
 
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
-	arr_points[1].y = y3_max;
-
-	arr_points[1].slope = dc_fixpt_zero;
+	corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+	corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+	corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+	corner_points[1].red.slope = dc_fixpt_zero;
+	corner_points[1].green.slope = dc_fixpt_zero;
+	corner_points[1].blue.slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 		const struct fixed31_32 end_value =
 				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dc_fixpt_div(
-			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
-			dc_fixpt_sub(end_value, arr_points[1].x));
+		corner_points[1].red.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+			dc_fixpt_sub(end_value, corner_points[1].red.x));
+		corner_points[1].green.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+			dc_fixpt_sub(end_value, corner_points[1].green.x));
+		corner_points[1].blue.slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+			dc_fixpt_sub(end_value, corner_points[1].blue.x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 		++i;
 	}
 	cm_helper_convert_to_custom_float(rgb_resulted,
-						lut_params->arr_points,
+						lut_params->corner_points,
 						hw_points, false);
 
 	return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 7a531b0..5ae4d69 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func(
 
 bool cm_helper_convert_to_custom_float(
 		struct pwl_result_data *rgb_resulted,
-		struct curve_points *arr_points,
+		struct curve_points3 *corner_points,
 		uint32_t hw_points_num,
 		bool fixpoint);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 4254e7e..c7d1e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
 
-	return true ? false : enable;
+	return enable ? true : false;
 }
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 74132a1..345af01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
 	return hubp_underflow;
 }
 
+
+void hubp1_clear_underflow(struct hubp *hubp)
+{
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+	REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
 static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -565,19 +573,6 @@ void hubp1_program_deadline(
 		REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
 		DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
 
-	if (REG(PREFETCH_SETTINS))
-		REG_SET_2(PREFETCH_SETTINS, 0,
-			DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
-			VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-	else
-		REG_SET_2(PREFETCH_SETTINGS, 0,
-			DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
-			VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-
-	REG_SET_2(VBLANK_PARAMETERS_0, 0,
-		DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
-		DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
-
 	REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
 		REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
 
@@ -585,9 +580,6 @@ void hubp1_program_deadline(
 	REG_SET(VBLANK_PARAMETERS_1, 0,
 		REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
 
-	REG_SET(VBLANK_PARAMETERS_3, 0,
-		REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
-
 	if (REG(NOM_PARAMETERS_0))
 		REG_SET(NOM_PARAMETERS_0, 0,
 			DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
@@ -602,27 +594,13 @@ void hubp1_program_deadline(
 	REG_SET(NOM_PARAMETERS_5, 0,
 		REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
 
-	REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
-		REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
-		REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
-
 	REG_SET_2(PER_LINE_DELIVERY, 0,
 		REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
 		REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
 
-	if (REG(PREFETCH_SETTINS_C))
-		REG_SET(PREFETCH_SETTINS_C, 0,
-			VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-	else
-		REG_SET(PREFETCH_SETTINGS_C, 0,
-			VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-
 	REG_SET(VBLANK_PARAMETERS_2, 0,
 		REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
 
-	REG_SET(VBLANK_PARAMETERS_4, 0,
-		REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
-
 	if (REG(NOM_PARAMETERS_2))
 		REG_SET(NOM_PARAMETERS_2, 0,
 			DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
@@ -642,10 +620,6 @@ void hubp1_program_deadline(
 		QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
 		QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
 
-	REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
-		MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
-		QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
-
 	/* TTU - per luma/chroma */
 	/* Assumed surf0 is luma and 1 is chroma */
 
@@ -654,25 +628,15 @@ void hubp1_program_deadline(
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
 
-	REG_SET(DCN_SURF0_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE,
-		ttu_attr->refcyc_per_req_delivery_pre_l);
-
 	REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
 		REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
 
-	REG_SET(DCN_SURF1_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE,
-		ttu_attr->refcyc_per_req_delivery_pre_c);
-
 	REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
 		REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
 		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
 		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
-	REG_SET(DCN_CUR0_TTU_CNTL1, 0,
-		REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
 }
 
 static void hubp1_setup(
@@ -690,6 +654,48 @@ static void hubp1_setup(
 	hubp1_vready_workaround(hubp, pipe_dest);
 }
 
+static void hubp1_setup_interdependent(
+		struct hubp *hubp,
+		struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+		struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+	REG_SET_2(PREFETCH_SETTINS, 0,
+		DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+		VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+	REG_SET(PREFETCH_SETTINS_C, 0,
+		VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+	REG_SET_2(VBLANK_PARAMETERS_0, 0,
+		DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+		DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+	REG_SET(VBLANK_PARAMETERS_3, 0,
+		REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+	REG_SET(VBLANK_PARAMETERS_4, 0,
+		REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+	REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+		REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+		REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+	REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		ttu_attr->refcyc_per_req_delivery_pre_l);
+	REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		ttu_attr->refcyc_per_req_delivery_pre_c);
+	REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+
+	REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+		MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+		QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
 bool hubp1_is_flip_pending(struct hubp *hubp)
 {
 	uint32_t flip_pending = 0;
@@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 			hubp1_program_surface_config,
 	.hubp_is_flip_pending = hubp1_is_flip_pending,
 	.hubp_setup = hubp1_setup,
+	.hubp_setup_interdependent = hubp1_setup_interdependent,
 	.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
 	.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
 	.set_blank = hubp1_set_blank,
@@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
 	.hubp_clk_cntl = hubp1_clk_cntl,
 	.hubp_vtg_sel = hubp1_vtg_sel,
 	.hubp_read_state = hubp1_read_state,
+	.hubp_clear_underflow = hubp1_clear_underflow,
 	.hubp_disable_control =  hubp1_disable_control,
 	.hubp_get_underflow_status = hubp1_get_underflow_status,
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4890273..62d4232 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -251,6 +251,7 @@
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
@@ -435,6 +436,7 @@
 	type HUBP_NO_OUTSTANDING_REQ;\
 	type HUBP_VTG_SEL;\
 	type HUBP_UNDERFLOW_STATUS;\
+	type HUBP_UNDERFLOW_CLEAR;\
 	type NUM_PIPES;\
 	type NUM_BANKS;\
 	type PIPE_INTERLEAVE;\
@@ -739,6 +741,7 @@ void dcn10_hubp_construct(
 	const struct dcn_mi_mask *hubp_mask);
 
 void hubp1_read_state(struct hubp *hubp);
+void hubp1_clear_underflow(struct hubp *hubp);
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 193184a..0bd33a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,7 @@
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
 #include "dc_link_dp.h"
+#include "dccg.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 			&dc->current_state->res_ctx.pipe_ctx[i];
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
-			if (hubp != NULL) {
+			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
 					/* one pipe underflow, we will reset all the pipes*/
 					need_recover = true;
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
 				hubp->funcs->set_hubp_blank_en(hubp, true);
 		}
 	}
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->hubp_disable_control)
 				hubp->funcs->hubp_disable_control(hubp, true);
 		}
 	}
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->hubp_disable_control)
 				hubp->funcs->hubp_disable_control(hubp, true);
 		}
 	}
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 		if (pipe_ctx != NULL) {
 			hubp = pipe_ctx->plane_res.hubp;
 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
-			if (hubp != NULL)
+			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
 				hubp->funcs->set_hubp_blank_en(hubp, true);
 		}
 	}
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc)
 
 	enable_power_gating_plane(dc->hwseq, true);
 
-	memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+	memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
 }
 
 static void reset_hw_ctx_wrap(
@@ -1226,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 		tf = plane_state->in_transfer_func;
 
 	if (plane_state->gamma_correction &&
-		!plane_state->gamma_correction->is_identity
+		!dpp_base->ctx->dc->debug.always_use_regamma
+		&& !plane_state->gamma_correction->is_identity
 			&& dce_use_lut(plane_state->format))
 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
 
@@ -1399,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
 					grouped_pipes[i]->stream_res.tg,
-					grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+					0,
 					&grouped_pipes[i]->stream->triggered_crtc_reset);
 
 	DC_SYNC_INFO("Waiting for trigger\n");
@@ -1603,7 +1605,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
 }
 
 
-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
 	struct vm_system_aperture_param apt = { {{ 0 } } };
@@ -1703,33 +1705,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
 }
 
-
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
-		enum dc_color_space colorspace,
-		uint16_t *matrix)
-{
-	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
-			if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
-	} else {
-		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
-			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
-	}
-}
-
 static void dcn10_program_output_csc(struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum dc_color_space colorspace,
 		uint16_t *matrix,
 		int opp_id)
 {
-	if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
-		program_csc_matrix(pipe_ctx,
-				colorspace,
-				matrix);
+	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+	} else {
+		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
+			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
+	}
 }
 
-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1738,7 +1729,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1747,7 +1738,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 	return false;
 }
 
-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
 {
 	if (pipe_ctx->plane_state->visible)
 		return true;
@@ -1780,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
 	}
 }
 
-static void dcn10_get_surface_visual_confirm_color(
+void dcn10_get_surface_visual_confirm_color(
 		const struct pipe_ctx *pipe_ctx,
 		struct tg_color *color)
 {
@@ -1816,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color(
 	}
 }
 
-static void dcn10_get_hdr_visual_confirm_color(
+void dcn10_get_hdr_visual_confirm_color(
 		struct pipe_ctx *pipe_ctx,
 		struct tg_color *color)
 {
@@ -1943,10 +1934,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	struct mpc *mpc = dc->res_pool->mpc;
 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-
-
-	/* TODO: proper fix once fpga works */
-
 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
 		dcn10_get_hdr_visual_confirm_color(
 				pipe_ctx, &blnd_cfg.black_color);
@@ -2026,8 +2013,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
 	bool per_pixel_alpha =
 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
 
-	/* TODO: proper fix once fpga works */
-
 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
 	/* scaler configuration */
@@ -2035,7 +2020,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
 }
 
-static void update_dchubp_dpp(
+void update_dchubp_dpp(
 	struct dc *dc,
 	struct pipe_ctx *pipe_ctx,
 	struct dc_state *context)
@@ -2052,16 +2037,22 @@ static void update_dchubp_dpp(
 	 */
 	if (plane_state->update_flags.bits.full_update) {
 		bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
-				dc->res_pool->dccg->clks.dispclk_khz / 2;
+				dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
 
 		dpp->funcs->dpp_dppclk_control(
 				dpp,
 				should_divided_by_2,
 				true);
 
-		dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
-						dc->res_pool->dccg->clks.dispclk_khz / 2 :
-							dc->res_pool->dccg->clks.dispclk_khz;
+		if (dc->res_pool->dccg)
+			dc->res_pool->dccg->funcs->update_dpp_dto(
+					dc->res_pool->dccg,
+					dpp->inst,
+					pipe_ctx->plane_res.bw.calc.dppclk_khz);
+		else
+			dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+						dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+							dc->res_pool->clk_mgr->clks.dispclk_khz;
 	}
 
 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2077,6 +2068,10 @@ static void update_dchubp_dpp(
 			&pipe_ctx->ttu_regs,
 			&pipe_ctx->rq_regs,
 			&pipe_ctx->pipe_dlg_param);
+		hubp->funcs->hubp_setup_interdependent(
+			hubp,
+			&pipe_ctx->dlg_regs,
+			&pipe_ctx->ttu_regs);
 	}
 
 	size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
@@ -2182,7 +2177,7 @@ static void dcn10_blank_pixel_data(
 	}
 }
 
-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 {
 	struct fixed31_32 multiplier = dc_fixpt_from_fraction(
 			pipe_ctx->plane_state->sdr_white_level, 80);
@@ -2257,47 +2252,7 @@ static void program_all_pipe_in_tree(
 	}
 }
 
-static void dcn10_pplib_apply_display_requirements(
-	struct dc *dc,
-	struct dc_state *context)
-{
-	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
-	pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-	pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
-	pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-	pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
-	pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
-	pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
-	dce110_fill_display_configs(context, pp_display_cfg);
-
-	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
-			struct dm_pp_display_configuration)) !=  0)
-		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
-	dc->prev_display_config = *pp_display_cfg;
-}
-
-static void optimize_shared_resources(struct dc *dc)
-{
-	if (dc->current_state->stream_count == 0) {
-		/* S0i2 message */
-		dcn10_pplib_apply_display_requirements(dc, dc->current_state);
-	}
-
-	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
-		dcn_bw_notify_pplib_of_wm_ranges(dc);
-}
-
-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
-{
-	/* S0i2 message */
-	if (dc->current_state->stream_count == 0 &&
-			context->stream_count != 0)
-		dcn10_pplib_apply_display_requirements(dc, context);
-}
-
-static struct pipe_ctx *find_top_pipe_for_stream(
+struct pipe_ctx *find_top_pipe_for_stream(
 		struct dc *dc,
 		struct dc_state *context,
 		const struct dc_stream_state *stream)
@@ -2387,6 +2342,32 @@ static void dcn10_apply_ctx_for_surface(
 
 	dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
 
+	if (top_pipe_to_program->plane_state &&
+			top_pipe_to_program->plane_state->update_flags.bits.full_update)
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+			/* Skip inactive pipes and ones already updated */
+			if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+				continue;
+
+			pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+				pipe_ctx->plane_res.hubp,
+				&pipe_ctx->dlg_regs,
+				&pipe_ctx->ttu_regs);
+		}
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+			continue;
+
+		dcn10_pipe_control_lock(dc, pipe_ctx, false);
+	}
+
 	if (num_planes == 0)
 		false_optc_underflow_wa(dc, stream, tg);
 
@@ -2398,10 +2379,9 @@ static void dcn10_apply_ctx_for_surface(
 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
 }
 
-static void dcn10_set_bandwidth(
+static void dcn10_prepare_bandwidth(
 		struct dc *dc,
-		struct dc_state *context,
-		bool safe_to_lower)
+		struct dc_state *context)
 {
 	if (dc->debug.sanity_checks)
 		dcn10_verify_allow_pstate_change_high(dc);
@@ -2410,12 +2390,10 @@ static void dcn10_set_bandwidth(
 		if (context->stream_count == 0)
 			context->bw.dcn.clk.phyclk_khz = 0;
 
-		dc->res_pool->dccg->funcs->update_clocks(
-				dc->res_pool->dccg,
-				&context->bw.dcn.clk,
-				safe_to_lower);
-
-		dcn10_pplib_apply_display_requirements(dc, context);
+		dc->res_pool->clk_mgr->funcs->update_clocks(
+				dc->res_pool->clk_mgr,
+				context,
+				false);
 	}
 
 	hubbub1_program_watermarks(dc->res_pool->hubbub,
@@ -2423,6 +2401,38 @@ static void dcn10_set_bandwidth(
 			dc->res_pool->ref_clock_inKhz / 1000,
 			true);
 
+	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+		dcn_bw_notify_pplib_of_wm_ranges(dc);
+
+	if (dc->debug.sanity_checks)
+		dcn10_verify_allow_pstate_change_high(dc);
+}
+
+static void dcn10_optimize_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	if (dc->debug.sanity_checks)
+		dcn10_verify_allow_pstate_change_high(dc);
+
+	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+		if (context->stream_count == 0)
+			context->bw.dcn.clk.phyclk_khz = 0;
+
+		dc->res_pool->clk_mgr->funcs->update_clocks(
+				dc->res_pool->clk_mgr,
+				context,
+				true);
+	}
+
+	hubbub1_program_watermarks(dc->res_pool->hubbub,
+			&context->bw.dcn.watermarks,
+			dc->res_pool->ref_clock_inKhz / 1000,
+			true);
+
+	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+		dcn_bw_notify_pplib_of_wm_ranges(dc);
+
 	if (dc->debug.sanity_checks)
 		dcn10_verify_allow_pstate_change_high(dc);
 }
@@ -2694,7 +2704,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
 
 static const struct hw_sequencer_funcs dcn10_funcs = {
 	.program_gamut_remap = program_gamut_remap,
-	.program_csc_matrix = program_csc_matrix,
 	.init_hw = dcn10_init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
@@ -2721,7 +2730,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.disable_plane = dcn10_disable_plane,
 	.blank_pixel_data = dcn10_blank_pixel_data,
 	.pipe_control_lock = dcn10_pipe_control_lock,
-	.set_bandwidth = dcn10_set_bandwidth,
+	.prepare_bandwidth = dcn10_prepare_bandwidth,
+	.optimize_bandwidth = dcn10_optimize_bandwidth,
 	.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
 	.enable_stream_timing = dcn10_enable_stream_timing,
 	.set_drr = set_drr,
@@ -2731,11 +2741,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.set_avmute = dce110_set_avmute,
 	.log_hw_state = dcn10_log_hw_state,
 	.get_hw_state = dcn10_get_hw_state,
+	.clear_status_bits = dcn10_clear_status_bits,
 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
-	.ready_shared_resources = ready_shared_resources,
-	.optimize_shared_resources = optimize_shared_resources,
-	.pplib_apply_display_requirements =
-			dcn10_pplib_apply_display_requirements,
 	.edp_backlight_control = hwss_edp_backlight_control,
 	.edp_power_control = hwss_edp_power_control,
 	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 84d461e..f8eea10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,4 +51,34 @@ void dcn10_get_hw_state(
 		char *pBuf, unsigned int bufSize,
 		unsigned int mask);
 
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
+
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+
+void dcn10_get_surface_visual_confirm_color(
+		const struct pipe_ctx *pipe_ctx,
+		struct tg_color *color);
+
+void dcn10_get_hdr_visual_confirm_color(
+		struct pipe_ctx *pipe_ctx,
+		struct tg_color *color);
+
+void update_dchubp_dpp(
+	struct dc *dc,
+	struct pipe_ctx *pipe_ctx,
+	struct dc_state *context);
+
+struct pipe_ctx *find_top_pipe_for_stream(
+		struct dc *dc,
+		struct dc_state *context,
+		const struct dc_stream_state *stream);
+
 #endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 6415890..cd46901 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -44,6 +44,7 @@
 #include "dcn10_hubp.h"
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
 
 static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
 {
@@ -454,12 +455,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
 
 			remaining_buffer -= chars_printed;
 			pBuf += chars_printed;
-
-			// Clear underflow for debug purposes
-			// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
-			// This function is called only from Windows or Diags test environment, hence it's safe to clear
-			// it from here without affecting the original intent.
-			tg->funcs->clear_optc_underflow(tg);
 		}
 	}
 
@@ -469,19 +464,75 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
 static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
 {
 	unsigned int chars_printed = 0;
+	unsigned int remaining_buffer = bufSize;
 
-	chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
-		"dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
-		"%d,%d,%d,%d,%d,%d,%d\n",
+	chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+		"dppclk,fclk,socclk\n"
+		"%d,%d,%d,%d,%d,%d\n",
 		dc->current_state->bw.dcn.clk.dcfclk_khz,
 		dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
 		dc->current_state->bw.dcn.clk.dispclk_khz,
 		dc->current_state->bw.dcn.clk.dppclk_khz,
-		dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
 		dc->current_state->bw.dcn.clk.fclk_khz,
 		dc->current_state->bw.dcn.clk.socclk_khz);
 
-	return chars_printed;
+	remaining_buffer -= chars_printed;
+	pBuf += chars_printed;
+
+	return bufSize - remaining_buffer;
+}
+
+static void dcn10_clear_otpc_underflow(struct dc *dc)
+{
+	struct resource_pool *pool = dc->res_pool;
+	int i;
+
+	for (i = 0; i < pool->timing_generator_count; i++) {
+		struct timing_generator *tg = pool->timing_generators[i];
+		struct dcn_otg_state s = {0};
+
+		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+		if (s.otg_enabled & 1)
+			tg->funcs->clear_optc_underflow(tg);
+	}
+}
+
+static void dcn10_clear_hubp_underflow(struct dc *dc)
+{
+	struct resource_pool *pool = dc->res_pool;
+	int i;
+
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct hubp *hubp = pool->hubps[i];
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+		hubp->funcs->hubp_read_state(hubp);
+
+		if (!s->blank_en)
+			hubp->funcs->hubp_clear_underflow(hubp);
+	}
+}
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
+{
+	/*
+	 *  Mask Format
+	 *  Bit 0 - 31: Status bit to clear
+	 *
+	 *  Mask = 0x0 means clear all status bits
+	 */
+	const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW	= 0x1;
+	const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW	= 0x2;
+
+	if (mask == 0x0)
+		mask = 0xFFFFFFFF;
+
+	if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
+		dcn10_clear_hubp_underflow(dc);
+
+	if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
+		dcn10_clear_otpc_underflow(dc);
 }
 
 void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
@@ -491,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
 	 *  Bit 0 - 15: Hardware block mask
 	 *  Bit 15: 1 = Invariant Only, 0 = All
 	 */
-	const unsigned int DC_HW_STATE_MASK_HUBBUB 	= 0x1;
-	const unsigned int DC_HW_STATE_MASK_HUBP 	= 0x2;
-	const unsigned int DC_HW_STATE_MASK_RQ 		= 0x4;
-	const unsigned int DC_HW_STATE_MASK_DLG 	= 0x8;
-	const unsigned int DC_HW_STATE_MASK_TTU 	= 0x10;
-	const unsigned int DC_HW_STATE_MASK_CM 		= 0x20;
-	const unsigned int DC_HW_STATE_MASK_MPCC 	= 0x40;
-	const unsigned int DC_HW_STATE_MASK_OTG 	= 0x80;
-	const unsigned int DC_HW_STATE_MASK_CLOCKS 	= 0x100;
-	const unsigned int DC_HW_STATE_INVAR_ONLY	= 0x8000;
+	const unsigned int DC_HW_STATE_MASK_HUBBUB			= 0x1;
+	const unsigned int DC_HW_STATE_MASK_HUBP			= 0x2;
+	const unsigned int DC_HW_STATE_MASK_RQ				= 0x4;
+	const unsigned int DC_HW_STATE_MASK_DLG				= 0x8;
+	const unsigned int DC_HW_STATE_MASK_TTU				= 0x10;
+	const unsigned int DC_HW_STATE_MASK_CM				= 0x20;
+	const unsigned int DC_HW_STATE_MASK_MPCC			= 0x40;
+	const unsigned int DC_HW_STATE_MASK_OTG				= 0x80;
+	const unsigned int DC_HW_STATE_MASK_CLOCKS			= 0x100;
+	const unsigned int DC_HW_STATE_INVAR_ONLY			= 0x8000;
 
 	unsigned int chars_printed = 0;
 	unsigned int remaining_buf_size = bufSize;
@@ -556,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
 		remaining_buf_size -= chars_printed;
 	}
 
-	if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+	if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
 		chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+		pBuf += chars_printed;
+		remaining_buf_size -= chars_printed;
+	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ba6a8686..477ab92 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
 		return false;
 
 	/* DCE11 HW does not support 420 */
-	if (!enc10->base.features.ycbcr420_supported &&
+	if (!enc10->base.features.hdmi_ycbcr420_supported &&
 			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 		return false;
 
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
 	const struct dcn10_link_encoder *enc10,
 	const struct dc_crtc_timing *crtc_timing)
 {
-	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-		return false;
+	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+		if (!enc10->base.features.dp_ycbcr420_supported)
+			return false;
+	}
 
 	return true;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 5462668..7c13861 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc)
 	REG_SET(OTG_STEREO_CONTROL, 0,
 		OTG_STEREO_EN, 0);
 
-	REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+	REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
 		OTG_3D_STRUCTURE_EN, 0,
-		OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
 		OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
 }
 
@@ -274,10 +273,12 @@ void optc1_program_timing(
 	 * program the reg for interrupt postition.
 	 */
 	vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
-	if (vertical_line_start < 0) {
-		ASSERT(0);
+	v_fp2 = 0;
+	if (vertical_line_start < 0)
+		v_fp2 = -vertical_line_start;
+	if (vertical_line_start < 0)
 		vertical_line_start = 0;
-	}
+
 	REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
 			OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
 
@@ -296,9 +297,6 @@ void optc1_program_timing(
 		if (patched_crtc_timing.flags.INTERLACE == 1)
 			field_num = 1;
 	}
-	v_fp2 = 0;
-	if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
-		v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
 
 	/* Interlace */
 	if (patched_crtc_timing.flags.INTERLACE == 1) {
@@ -337,9 +335,8 @@ void optc1_program_timing(
 	/* Enable stereo - only when we need to pack 3D frame. Other types
 	 * of stereo handled in explicit call
 	 */
-	h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
-			1 : 0;
 
+	h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
 	REG_UPDATE(OTG_H_TIMING_CNTL,
 			OTG_H_TIMING_DIV_BY2, h_div_2);
 
@@ -362,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
 static void optc1_unblank_crtc(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
-	uint32_t vertical_interrupt_enable = 0;
-
-	REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
-			OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
-
-	/* temporary work around for vertical interrupt, once vertical interrupt enabled,
-	 * this check will be removed.
-	 */
-	if (vertical_interrupt_enable)
-		optc1_set_blank_data_double_buffer(optc, true);
 
 	REG_UPDATE_2(OTG_BLANK_CONTROL,
 			OTG_BLANK_DATA_EN, 0,
 			OTG_BLANK_DE_MODE, 0);
+
+	/* W/A for automated testing
+	 * Automated testing will fail underflow test as there
+	 * sporadic underflows which occur during the optc blank
+	 * sequence.  As a w/a, clear underflow on unblank.
+	 * This prevents the failure, but will not mask actual
+	 * underflow that affect real use cases.
+	 */
+	optc1_clear_optc_underflow(optc);
 }
 
 /**
@@ -1155,9 +1151,8 @@ static void optc1_enable_stereo(struct timing_generator *optc,
 				OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
 
 		if (flags->PROGRAM_STEREO)
-			REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+			REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
 				OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
-				OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
 				OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
 
 	}
@@ -1425,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1)
 	optc1->min_h_sync_width = 8;
 	optc1->min_v_sync_width = 1;
 }
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+	return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c1b1142..8bacf0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc,
 bool optc1_get_crc(struct timing_generator *optc,
 		    uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
 
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a71453a..5d4772d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,23 +28,23 @@
 
 #include "resource.h"
 #include "include/irq_service_interface.h"
-#include "dcn10/dcn10_resource.h"
+#include "dcn10_resource.h"
 
-#include "dcn10/dcn10_ipp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_ipp.h"
+#include "dcn10_mpc.h"
 #include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10/dcn10_dpp.h"
+#include "dcn10_dpp.h"
 #include "dcn10_optc.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
 #include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_opp.h"
-#include "dcn10/dcn10_link_encoder.h"
-#include "dcn10/dcn10_stream_encoder.h"
-#include "dce/dce_clocks.h"
+#include "dcn10_opp.h"
+#include "dcn10_link_encoder.h"
+#include "dcn10_stream_encoder.h"
+#include "dcn10_clk_mgr.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
-#include "../virtual/virtual_stream_encoder.h"
+#include "virtual/virtual_stream_encoder.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
 #include "dcn10_hubp.h"
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
 #define MMHUB_SR(reg_name)\
 		.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
 					mm ## reg_name
-
 /* macros to expend register list macro defined in HW object header file
  * end *********************/
 
@@ -436,8 +435,8 @@ static const struct dcn_optc_mask tg_mask = {
 	TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
 };
 
-
 static const struct bios_registers bios_regs = {
+		NBIO_SR(BIOS_SCRATCH_0),
 		NBIO_SR(BIOS_SCRATCH_3),
 		NBIO_SR(BIOS_SCRATCH_6)
 };
@@ -496,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
 		CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
 };
 
-
 static const struct resource_caps res_cap = {
 		.num_timing_generator = 4,
 		.num_opp = 4,
@@ -719,7 +717,8 @@ static struct timing_generator *dcn10_timing_generator_create(
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
 		.max_hdmi_pixel_clock = 600000,
-		.ycbcr420_supported = true,
+		.hdmi_ycbcr420_supported = true,
+		.dp_ycbcr420_supported = false,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -949,8 +948,8 @@ static void destruct(struct dcn10_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
-	if (pool->base.dccg != NULL)
-		dce_dccg_destroy(&pool->base.dccg);
+	if (pool->base.clk_mgr != NULL)
+		dce_clk_mgr_destroy(&pool->base.clk_mgr);
 
 	kfree(pool->base.pp_smu);
 }
@@ -1275,9 +1274,8 @@ static bool construct(
 			goto fail;
 		}
 	}
-
-	pool->base.dccg = dcn1_dccg_create(ctx);
-	if (pool->base.dccg == NULL) {
+	pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
+	if (pool->base.clk_mgr == NULL) {
 		dm_error("DC: failed to create display clock!\n");
 		BREAK_TO_DEBUGGER();
 		goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 6f9078f..b8b5525 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank(
 	struct stream_encoder *enc)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t retries = 0;
 	uint32_t  reg1 = 0;
 	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
 
@@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank(
 			0,
 			10, max_retries);
 
-	ASSERT(retries <= max_retries);
-
 	/* Tell the DP encoder to ignore timing from CRTC, must be done after
 	 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
 	 * complete, stream status will be stuck in video stream enabled state,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
index 34a701c..65663f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
@@ -33,6 +33,7 @@
 
 #define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
 #define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index f2ea845..0029a39 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -55,10 +55,10 @@ struct pp_smu {
 
 struct pp_smu_wm_set_range {
 	unsigned int wm_inst;
-	uint32_t min_fill_clk_khz;
-	uint32_t max_fill_clk_khz;
-	uint32_t min_drain_clk_khz;
-	uint32_t max_drain_clk_khz;
+	uint32_t min_fill_clk_mhz;
+	uint32_t max_fill_clk_mhz;
+	uint32_t min_drain_clk_mhz;
+	uint32_t max_drain_clk_mhz;
 };
 
 #define MAX_WATERMARK_SETS 4
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv {
 	 */
 	unsigned int display_count;
 
-	/* PPSMC_MSG_SetHardMinFclkByFreq: khz
+	/* PPSMC_MSG_SetHardMinFclkByFreq: mhz
 	 *  FCLK will vary with DPM, but never below requested hard min
 	 */
-	unsigned int hard_min_fclk_khz;
+	unsigned int hard_min_fclk_mhz;
 
-	/* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+	/* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
 	 *  fixed clock at requested freq, either from FCH bypass or DFS
 	 */
-	unsigned int hard_min_dcefclk_khz;
+	unsigned int hard_min_dcefclk_mhz;
 
 	/* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
 	 *  when DF is in cstate, dcf clock is further divided down
@@ -102,14 +102,20 @@ struct pp_smu_funcs_rv {
 	 */
 	void (*set_display_count)(struct pp_smu *pp, int count);
 
-	/* which SMU message?  are reader and writer WM separate SMU msg? */
+	/* reader and writer WM's are sent together as part of one table*/
+	/*
+	 * PPSMC_MSG_SetDriverDramAddrHigh
+	 * PPSMC_MSG_SetDriverDramAddrLow
+	 * PPSMC_MSG_TransferTableDram2Smu
+	 *
+	 * */
 	void (*set_wm_ranges)(struct pp_smu *pp,
 			struct pp_smu_wm_range_sets *ranges);
 
 	/* PPSMC_MSG_SetHardMinDcfclkByFreq
 	 * fixed clock at requested freq, either from FCH bypass or DFS
 	 */
-	void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PPSMC_MSG_SetMinDeepSleepDcfclk
 	 * when DF is in cstate, dcf clock is further divided down
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv {
 	/* PPSMC_MSG_SetHardMinFclkByFreq
 	 * FCLK will vary with DPM, but never below requested hard min
 	 */
-	void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PPSMC_MSG_SetHardMinSocclkByFreq
 	 * Needed for DWB support
 	 */
-	void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+	void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
 
 	/* PME w/a */
 	void (*set_pme_wa_enable)(struct pp_smu *pp);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 28128c0..1961cc6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -31,6 +31,8 @@
 
 #define __DM_SERVICES_H__
 
+#include "amdgpu_dm_trace.h"
+
 /* TODO: remove when DC is complete. */
 #include "dm_services_types.h"
 #include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
 	}
 #endif
 	value = cgs_read_register(ctx->cgs_device, address);
+	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
 
 	return value;
 }
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
 	}
 #endif
 	cgs_write_register(ctx->cgs_device, address, value);
+	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
 }
 
 static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
 /*
  * performance tracing
  */
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE()	dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE()	trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+		CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+		&CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX)	trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+		__CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+		&__CTX->perf_trace->last_entry_write, __func__, __LINE__)
 
 
 /*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 2b83f92..1af8c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -208,22 +208,20 @@ struct dm_bl_data_point {
 		/* Brightness level as effective value in range 0-255,
 		 * corresponding to above percentage
 		 */
-		uint8_t signalLevel;
+		uint8_t signal_level;
 };
 
 /* Total size of the structure should not exceed 256 bytes */
 struct dm_acpi_atif_backlight_caps {
-
-
 	uint16_t size; /* Bytes 0-1 (2 bytes) */
 	uint16_t flags; /* Byted 2-3 (2 bytes) */
-	uint8_t  errorCode; /* Byte 4 */
-	uint8_t  acLevelPercentage; /* Byte 5 */
-	uint8_t  dcLevelPercentage; /* Byte 6 */
-	uint8_t  minInputSignal; /* Byte 7 */
-	uint8_t  maxInputSignal; /* Byte 8 */
-	uint8_t  numOfDataPoints; /* Byte 9 */
-	struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+	uint8_t  error_code; /* Byte 4 */
+	uint8_t  ac_level_percentage; /* Byte 5 */
+	uint8_t  dc_level_percentage; /* Byte 6 */
+	uint8_t  min_input_signal; /* Byte 7 */
+	uint8_t  max_input_signal; /* Byte 8 */
+	uint8_t  num_data_points; /* Byte 9 */
+	struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
 };
 
 enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cbafce6..5dd0452 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st {
 	int use_urgent_burst_bw;
 	double max_hscl_ratio;
 	double max_vscl_ratio;
-	struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+	unsigned int num_states;
+	struct _vcs_dpi_voltage_scaling_st clock_limits[8];
 };
 
 struct _vcs_dpi_ip_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f20161c..dada042 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
 	struct dc_context *ctx)
 {
 	struct gpio_service *service;
-
 	uint32_t index_of_id;
 
 	service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
 		goto failure_1;
 	}
 
-	/* allocate and initialize business storage */
+	/* allocate and initialize busyness storage */
 	{
-		const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
 		index_of_id = 0;
 		service->ctx = ctx;
 
 		do {
 			uint32_t number_of_bits =
 				service->factory.number_of_pins[index_of_id];
+			uint32_t i = 0;
 
-			uint32_t number_of_uints =
-				(number_of_bits + bits_per_uint - 1) /
-				bits_per_uint;
+			if (number_of_bits)  {
+				service->busyness[index_of_id] =
+					kcalloc(number_of_bits, sizeof(char),
+						GFP_KERNEL);
 
-			uint32_t *slot;
-
-			if (number_of_bits) {
-				uint32_t index_of_uint = 0;
-
-				slot = kcalloc(number_of_uints,
-					       sizeof(uint32_t),
-					       GFP_KERNEL);
-
-				if (!slot) {
+				if (!service->busyness[index_of_id]) {
 					BREAK_TO_DEBUGGER();
 					goto failure_2;
 				}
 
 				do {
-					slot[index_of_uint] = 0;
-
-					++index_of_uint;
-				} while (index_of_uint < number_of_uints);
-			} else
-				slot = NULL;
-
-			service->busyness[index_of_id] = slot;
+					service->busyness[index_of_id][i] = 0;
+					++i;
+				} while (i < number_of_bits);
+			} else {
+				service->busyness[index_of_id] = NULL;
+			}
 
 			++index_of_id;
 		} while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
 
 failure_2:
 	while (index_of_id) {
-		uint32_t *slot;
-
 		--index_of_id;
-
-		slot = service->busyness[index_of_id];
-
-		kfree(slot);
+		kfree(service->busyness[index_of_id]);
 	}
 
 failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
 		uint32_t index_of_id = 0;
 
 		do {
-			uint32_t *slot = (*ptr)->busyness[index_of_id];
-
-			kfree(slot);
+			kfree((*ptr)->busyness[index_of_id]);
 
 			++index_of_id;
 		} while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
 	enum gpio_id id,
 	uint32_t en)
 {
-	const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-	const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
-	return 0 != (*slot & (1 << (en % bits_per_uint)));
+	return service->busyness[id][en];
 }
 
 static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
 	enum gpio_id id,
 	uint32_t en)
 {
-	const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-	service->busyness[id][en / bits_per_uint] |=
-		(1 << (en % bits_per_uint));
+	service->busyness[id][en] = true;
 }
 
 static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
 	enum gpio_id id,
 	uint32_t en)
 {
-	const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-	service->busyness[id][en / bits_per_uint] &=
-		~(1 << (en % bits_per_uint));
+	service->busyness[id][en] = false;
 }
 
 enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index c7f3081..1d501a4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -36,10 +36,9 @@ struct gpio_service {
 	/*
 	 * @brief
 	 * Business storage.
-	 * For each member of 'enum gpio_id',
-	 * store array of bits (packed into uint32_t slots),
-	 * index individual bit by 'en' value */
-	uint32_t *busyness[GPIO_ID_COUNT];
+	 * one byte For each member of 'enum gpio_id'
+	 */
+	char *busyness[GPIO_ID_COUNT];
 };
 
 enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
index 39ee8eba3..d1656c9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
 static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
 {
 	struct bw_fixed res;
-	div64_u64_rem(arg1.value, arg2.value, &res.value);
+	div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
 	return res;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
index bcb18f5..7a147a9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -77,6 +77,7 @@ struct compressor_funcs {
 };
 struct compressor {
 	struct dc_context *ctx;
+	/* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
 	uint32_t attached_inst;
 	bool is_enabled;
 	const struct compressor_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c1976c1..b168a5e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
 
 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
 /********** DAL Core*********************/
-#include "display_clock.h"
+#include "hw/clk_mgr.h"
 #include "transform.h"
 #include "dpp.h"
 
@@ -169,6 +169,7 @@ struct resource_pool {
 	unsigned int audio_count;
 	struct audio_support audio_support;
 
+	struct clk_mgr *clk_mgr;
 	struct dccg *dccg;
 	struct irq_service *irqs;
 
@@ -271,6 +272,17 @@ union bw_context {
 	struct dce_bw_output dce;
 };
 
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
 struct dc_state {
 	struct dc_stream_state *streams[MAX_PIPES];
 	struct dc_stream_status stream_status[MAX_PIPES];
@@ -278,7 +290,6 @@ struct dc_state {
 
 	struct resource_context res_ctx;
 
-	/* The output from BW and WM calculations. */
 	union bw_context bw;
 
 	/* Note: these are big structures, do *not* put on stack! */
@@ -287,7 +298,7 @@ struct dc_state {
 	struct dcn_bw_internal_vars dcn_bw_vars;
 #endif
 
-	struct dccg *dis_clk;
+	struct clk_mgr *dccg;
 
 	struct kref refcount;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index e688eb9..ece954a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,8 +31,8 @@
 #define __DCN_CALCS_H__
 
 #include "bw_fixed.h"
-#include "display_clock.h"
 #include "../dml/display_mode_lib.h"
+#include "hw/clk_mgr.h"
 
 struct dc;
 struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index a83a484..abc961c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -47,12 +47,18 @@ struct abm_funcs {
 	bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
 	bool (*set_abm_immediate_disable)(struct abm *abm);
 	bool (*init_backlight)(struct abm *abm);
-	bool (*set_backlight_level)(struct abm *abm,
-			unsigned int backlight_level,
+
+	/* backlight_pwm_u16_16 is unsigned 32 bit,
+	 * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
+	 */
+	bool (*set_backlight_level_pwm)(struct abm *abm,
+			unsigned int backlight_pwm_u16_16,
 			unsigned int frame_ramp,
 			unsigned int controller_id,
 			bool use_smooth_brightness);
-	unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+
+	unsigned int (*get_current_backlight)(struct abm *abm);
+	unsigned int (*get_target_backlight)(struct abm *abm);
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
similarity index 63%
rename from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
rename to drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 689faa1..23a4b18 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -23,41 +23,25 @@
  *
  */
 
-#ifndef __DISPLAY_CLOCK_H__
-#define __DISPLAY_CLOCK_H__
+#ifndef __DAL_CLK_MGR_H__
+#define __DAL_CLK_MGR_H__
 
 #include "dm_services_types.h"
 #include "dc.h"
 
-/* Structure containing all state-dependent clocks
- * (dependent on "enum clocks_state") */
-struct state_dependent_clocks {
-	int display_clk_khz;
-	int pixel_clk_khz;
-};
-
-struct dccg {
+struct clk_mgr {
 	struct dc_context *ctx;
-	const struct display_clock_funcs *funcs;
+	const struct clk_mgr_funcs *funcs;
 
-	enum dm_pp_clocks_state max_clks_state;
-	enum dm_pp_clocks_state cur_min_clks_state;
 	struct dc_clocks clks;
 };
 
-struct display_clock_funcs {
-	void (*update_clocks)(struct dccg *dccg,
-			struct dc_clocks *new_clocks,
+struct clk_mgr_funcs {
+	void (*update_clocks)(struct clk_mgr *clk_mgr,
+			struct dc_state *context,
 			bool safe_to_lower);
-	int (*set_dispclk)(struct dccg *dccg,
-		int requested_clock_khz);
 
-	int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
-
-	bool (*update_dfs_bypass)(struct dccg *dccg,
-		struct dc *dc,
-		struct dc_state *context,
-		int requested_clock_khz);
+	int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
 };
 
-#endif /* __DISPLAY_CLOCK_H__ */
+#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
new file mode 100644
index 0000000..95a56d0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCCG_H__
+#define __DAL_DCCG_H__
+
+#include "dc_types.h"
+
+struct dccg {
+	struct dc_context *ctx;
+	const struct dccg_funcs *funcs;
+
+	int ref_dppclk;
+};
+
+struct dccg_funcs {
+	void (*update_dpp_dto)(struct dccg *dccg,
+			int dpp_inst,
+			int req_dppclk);
+};
+
+#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747..cb85eaa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
 	DMCU_RUNNING = 1
 };
 
+struct dmcu_version {
+	unsigned int date;
+	unsigned int month;
+	unsigned int year;
+	unsigned int interface_version;
+};
+
 struct dmcu {
 	struct dc_context *ctx;
 	const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 334c48c..04c6989 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,11 @@ struct hubp_funcs {
 			struct _vcs_dpi_display_rq_regs_st *rq_regs,
 			struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
 
+	void (*hubp_setup_interdependent)(
+			struct hubp *hubp,
+			struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+			struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+
 	void (*dcc_control)(struct hubp *hubp, bool enable,
 			bool independent_64b_blks);
 	void (*mem_program_viewport)(
@@ -121,6 +126,7 @@ struct hubp_funcs {
 	void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
 	void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
 	void (*hubp_read_state)(struct hubp *hubp);
+	void (*hubp_clear_underflow)(struct hubp *hubp);
 	void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
 	unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433e..da85537 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -53,6 +53,12 @@ struct curve_points {
 	uint32_t custom_float_slope;
 };
 
+struct curve_points3 {
+	struct curve_points red;
+	struct curve_points green;
+	struct curve_points blue;
+};
+
 struct pwl_result_data {
 	struct fixed31_32 red;
 	struct fixed31_32 green;
@@ -71,9 +77,17 @@ struct pwl_result_data {
 	uint32_t delta_blue_reg;
 };
 
+/* arr_curve_points - regamma regions/segments specification
+ * arr_points - beginning and end point specified separately (only one on DCE)
+ * corner_points - beginning and end point for all 3 colors (DCN)
+ * rgb_resulted - final curve
+ */
 struct pwl_params {
 	struct gamma_curve arr_curve_points[34];
-	struct curve_points arr_points[2];
+	union {
+		struct curve_points arr_points[2];
+		struct curve_points3 corner_points[2];
+	};
 	struct pwl_result_data rgb_resulted[256 + 3];
 	uint32_t hw_points_num;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index e28e977..c20fdca 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -65,7 +65,8 @@ struct encoder_feature_support {
 
 	enum dc_color_depth max_hdmi_deep_color;
 	unsigned int max_hdmi_pixel_clock;
-	bool ycbcr420_supported;
+	bool hdmi_ycbcr420_supported;
+	bool dp_ycbcr420_supported;
 };
 
 union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index da89c2e..06df02d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -31,7 +31,7 @@
 #include "dml/display_mode_structs.h"
 
 struct dchub_init_data;
-struct cstate_pstate_watermarks_st {
+struct cstate_pstate_watermarks_st1 {
 	uint32_t cstate_exit_ns;
 	uint32_t cstate_enter_plus_exit_ns;
 	uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
 struct dcn_watermarks {
 	uint32_t pte_meta_urgent_ns;
 	uint32_t urgent_ns;
-	struct cstate_pstate_watermarks_st cstate_pstate;
+	struct cstate_pstate_watermarks_st1 cstate_pstate;
 };
 
 struct dcn_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 26f29d5..d6a85f4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,8 +32,6 @@
 #include "inc/hw/link_encoder.h"
 #include "core_status.h"
 
-#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
-
 enum pipe_gating_control {
 	PIPE_GATING_CONTROL_DISABLE = 0,
 	PIPE_GATING_CONTROL_ENABLE,
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs {
 	void (*program_gamut_remap)(
 			struct pipe_ctx *pipe_ctx);
 
-	void (*program_csc_matrix)(
-			struct pipe_ctx *pipe_ctx,
-			enum dc_color_space colorspace,
-			uint16_t *matrix);
-
 	void (*program_output_csc)(struct dc *dc,
 			struct pipe_ctx *pipe_ctx,
 			enum dc_color_space colorspace,
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs {
 			struct pipe_ctx *pipe_ctx,
 			bool blank);
 
-	void (*set_bandwidth)(
+	void (*prepare_bandwidth)(
 			struct dc *dc,
-			struct dc_state *context,
-			bool safe_to_lower);
+			struct dc_state *context);
+	void (*optimize_bandwidth)(
+			struct dc *dc,
+			struct dc_state *context);
 
 	void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
 			int vmin, int vmax);
@@ -205,16 +200,12 @@ struct hw_sequencer_funcs {
 	void (*log_hw_state)(struct dc *dc,
 		struct dc_log_buffer_ctx *log_ctx);
 	void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+	void (*clear_status_bits)(struct dc *dc, unsigned int mask);
 
 	void (*wait_for_mpcc_disconnect)(struct dc *dc,
 			struct resource_pool *res_pool,
 			struct pipe_ctx *pipe_ctx);
 
-	void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
-	void (*optimize_shared_resources)(struct dc *dc);
-	void (*pplib_apply_display_requirements)(
-			struct dc *dc,
-			struct dc_state *context);
 	void (*edp_power_control)(
 			struct dc_link *link,
 			bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 33b99e3..0086a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -30,9 +30,6 @@
 #include "dal_asic_id.h"
 #include "dm_pp_smu.h"
 
-/* TODO unhardcode, 4 for CZ*/
-#define MEMORY_TYPE_MULTIPLIER 4
-
 enum dce_version resource_parse_asic_id(
 		struct hw_asic_id asic_id);
 
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cdcefd0..479b77c 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space(
 			a1);
 }
 
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+{
+	struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+
+	return translate_from_linear_space(arg,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			dc_fixpt_zero,
+			gamma);
+}
+
 static struct fixed31_32 translate_to_linear_space(
 	struct fixed31_32 arg,
 	struct fixed31_32 a0,
@@ -709,6 +721,175 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
 	}
 }
 
+static void hermite_spline_eetf(struct fixed31_32 input_x,
+				struct fixed31_32 max_display,
+				struct fixed31_32 min_display,
+				struct fixed31_32 max_content,
+				struct fixed31_32 *out_x)
+{
+	struct fixed31_32 min_lum_pq;
+	struct fixed31_32 max_lum_pq;
+	struct fixed31_32 max_content_pq;
+	struct fixed31_32 ks;
+	struct fixed31_32 E1;
+	struct fixed31_32 E2;
+	struct fixed31_32 E3;
+	struct fixed31_32 t;
+	struct fixed31_32 t2;
+	struct fixed31_32 t3;
+	struct fixed31_32 two;
+	struct fixed31_32 three;
+	struct fixed31_32 temp1;
+	struct fixed31_32 temp2;
+	struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
+	struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
+	struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
+
+	if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
+		*out_x = dc_fixpt_zero;
+		return;
+	}
+
+	compute_pq(input_x, &E1);
+	compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
+	compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
+	compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
+	a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
+	ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
+
+	if (dc_fixpt_lt(E1, ks))
+		E2 = E1;
+	else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
+		if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
+			// t = (E1 - ks) / (1 - ks)
+			t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
+					dc_fixpt_sub(dc_fixpt_one, ks));
+		else
+			t = dc_fixpt_zero;
+
+		two = dc_fixpt_from_int(2);
+		three = dc_fixpt_from_int(3);
+
+		t2 = dc_fixpt_mul(t, t);
+		t3 = dc_fixpt_mul(t2, t);
+		temp1 = dc_fixpt_mul(two, t3);
+		temp2 = dc_fixpt_mul(three, t2);
+
+		// (2t^3 - 3t^2 + 1) * ks
+		E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
+				dc_fixpt_sub(temp1, temp2)));
+
+		// (-2t^3 + 3t^2) * max_lum_pq
+		E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
+				dc_fixpt_sub(temp2, temp1)));
+
+		temp1 = dc_fixpt_mul(two, t2);
+		temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
+
+		// (t^3 - 2t^2 + t) * (1-ks)
+		E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+				dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+	} else
+		E2 = dc_fixpt_one;
+
+	temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+	temp2 = dc_fixpt_mul(temp1, temp1);
+	temp2 = dc_fixpt_mul(temp2, temp2);
+	// temp2 = (1-E2)^4
+
+	E3 =  dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
+	compute_de_pq(E3, out_x);
+
+	*out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
+}
+
+static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
+		uint32_t hw_points_num,
+		const struct hw_x_point *coordinate_x,
+		const struct freesync_hdr_tf_params *fs_params)
+{
+	uint32_t i;
+	struct pwl_float_data_ex *rgb = rgb_regamma;
+	const struct hw_x_point *coord_x = coordinate_x;
+	struct fixed31_32 scaledX = dc_fixpt_zero;
+	struct fixed31_32 scaledX1 = dc_fixpt_zero;
+	struct fixed31_32 max_display;
+	struct fixed31_32 min_display;
+	struct fixed31_32 max_content;
+	struct fixed31_32 min_content;
+	struct fixed31_32 clip = dc_fixpt_one;
+	struct fixed31_32 output;
+	bool use_eetf = false;
+	bool is_clipped = false;
+	struct fixed31_32 sdr_white_level;
+
+	if (fs_params == NULL || fs_params->max_content == 0 ||
+			fs_params->max_display == 0)
+		return false;
+
+	max_display = dc_fixpt_from_int(fs_params->max_display);
+	min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+	max_content = dc_fixpt_from_int(fs_params->max_content);
+	min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+	sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
+	if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
+		min_display = dc_fixpt_from_fraction(1, 10);
+	if (fs_params->max_display < 100) // cap at 100 at the top
+		max_display = dc_fixpt_from_int(100);
+
+	if (fs_params->min_content < fs_params->min_display)
+		use_eetf = true;
+	else
+		min_content = min_display;
+
+	if (fs_params->max_content > fs_params->max_display)
+		use_eetf = true;
+	else
+		max_content = max_display;
+
+	rgb += 32; // first 32 points have problems with fixed point, too small
+	coord_x += 32;
+	for (i = 32; i <= hw_points_num; i++) {
+		if (!is_clipped) {
+			if (use_eetf) {
+				/*max content is equal 1 */
+				scaledX1 = dc_fixpt_div(coord_x->x,
+						dc_fixpt_div(max_content, sdr_white_level));
+				hermite_spline_eetf(scaledX1, max_display, min_display,
+						max_content, &scaledX);
+			} else
+				scaledX = dc_fixpt_div(coord_x->x,
+						dc_fixpt_div(max_display, sdr_white_level));
+
+			if (dc_fixpt_lt(scaledX, clip)) {
+				if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
+					output = dc_fixpt_zero;
+				else
+					output = calculate_gamma22(scaledX);
+
+				rgb->r = output;
+				rgb->g = output;
+				rgb->b = output;
+			} else {
+				is_clipped = true;
+				rgb->r = clip;
+				rgb->g = clip;
+				rgb->b = clip;
+			}
+		} else {
+			rgb->r = clip;
+			rgb->g = clip;
+			rgb->b = clip;
+		}
+
+		++coord_x;
+		++rgb;
+	}
+
+	return true;
+}
+
 static void build_degamma(struct pwl_float_data_ex *curve,
 		uint32_t hw_points_num,
 		const struct hw_x_point *coordinate_x, bool is_2_4)
@@ -1356,7 +1537,8 @@ static bool map_regamma_hw_to_x_user(
 #define _EXTRA_POINTS 3
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params)
 {
 	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
 	struct dividers dividers;
@@ -1374,7 +1556,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 	/* we can use hardcoded curve for plain SRGB TF */
 	if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
 			output_tf->tf == TRANSFER_FUNCTION_SRGB &&
-			(!mapUserRamp && ramp->type == GAMMA_RGB_256))
+			(ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)))
 		return true;
 
 	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1424,6 +1606,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 				MAX_HW_POINTS,
 				coordinates_x,
 				output_tf->sdr_ref_white_level);
+	} else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
+			fs_params != NULL) {
+		build_freesync_hdr(rgb_regamma,
+				MAX_HW_POINTS,
+				coordinates_x,
+				fs_params);
 	} else {
 		tf_pts->end_exponent = 0;
 		tf_pts->x_point_at_y1_red = 1;
@@ -1573,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 	struct pwl_float_data *rgb_user = NULL;
 	struct pwl_float_data_ex *curve = NULL;
-	struct gamma_pixel *axix_x = NULL;
+	struct gamma_pixel *axis_x = NULL;
 	struct pixel_gamma_point *coeff = NULL;
 	enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
 	bool ret = false;
@@ -1599,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 			 GFP_KERNEL);
 	if (!curve)
 		goto curve_alloc_fail;
-	axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+	axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
 			  GFP_KERNEL);
-	if (!axix_x)
-		goto axix_x_alloc_fail;
+	if (!axis_x)
+		goto axis_x_alloc_fail;
 	coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
 			 GFP_KERNEL);
 	if (!coeff)
@@ -1615,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 	tf = input_tf->tf;
 
 	build_evenly_distributed_points(
-			axix_x,
+			axis_x,
 			ramp->num_entries,
 			dividers);
 
@@ -1640,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 	tf_pts->x_point_at_y1_blue = 1;
 
 	map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-			coordinates_x, axix_x, curve,
+			coordinates_x, axis_x, curve,
 			MAX_HW_POINTS, tf_pts,
 			mapUserRamp && ramp->type != GAMMA_CUSTOM);
 	if (ramp->type == GAMMA_CUSTOM)
@@ -1650,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 	kvfree(coeff);
 coeff_alloc_fail:
-	kvfree(axix_x);
-axix_x_alloc_fail:
+	kvfree(axis_x);
+axis_x_alloc_fail:
 	kvfree(curve);
 curve_alloc_fail:
 	kvfree(rgb_user);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 63ccb9c..a6e164d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -73,12 +73,21 @@ struct regamma_lut {
 	};
 };
 
+struct freesync_hdr_tf_params {
+	unsigned int sdr_white_level;
+	unsigned int min_content; // luminance in 1/10000 nits
+	unsigned int max_content; // luminance in nits
+	unsigned int min_display; // luminance in 1/10000 nits
+	unsigned int max_display; // luminance in nits
+};
+
 void setup_x_points_distribution(void);
 void precompute_pq(void);
 void precompute_de_pq(void);
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
-		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+		const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+		const struct freesync_hdr_tf_params *fs_params);
 
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
 		const struct dc_gamma *ramp, bool mapUserRamp);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4018c71..1544ed3 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
 #define RENDER_TIMES_MAX_COUNT 10
 /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
 #define BTR_EXIT_MARGIN 2000
+/*Threshold to exit fixed refresh rate*/
+#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
 /* Number of consecutive frames to check before entering/exiting fixed refresh*/
 #define FIXED_REFRESH_ENTER_FRAME_COUNT 5
 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 		if (in_out_vrr->btr.btr_active) {
 			in_out_vrr->btr.frame_counter = 0;
 			in_out_vrr->btr.btr_active = false;
-
-		/* Exit Fixed Refresh mode */
-		} else if (in_out_vrr->fixed.fixed_active) {
-
-			in_out_vrr->fixed.frame_counter++;
-
-			if (in_out_vrr->fixed.frame_counter >
-					FIXED_REFRESH_EXIT_FRAME_COUNT) {
-				in_out_vrr->fixed.frame_counter = 0;
-				in_out_vrr->fixed.fixed_active = false;
-			}
 		}
 	} else if (last_render_time_in_us > max_render_time_in_us) {
 		/* Enter Below the Range */
-		if (!in_out_vrr->btr.btr_active &&
-				in_out_vrr->btr.btr_enabled) {
-			in_out_vrr->btr.btr_active = true;
-
-		/* Enter Fixed Refresh mode */
-		} else if (!in_out_vrr->fixed.fixed_active &&
-				!in_out_vrr->btr.btr_enabled) {
-			in_out_vrr->fixed.frame_counter++;
-
-			if (in_out_vrr->fixed.frame_counter >
-					FIXED_REFRESH_ENTER_FRAME_COUNT) {
-				in_out_vrr->fixed.frame_counter = 0;
-				in_out_vrr->fixed.fixed_active = true;
-			}
-		}
+		in_out_vrr->btr.btr_active = true;
 	}
 
 	/* BTR set to "not active" so disengage */
 	if (!in_out_vrr->btr.btr_active) {
-		in_out_vrr->btr.btr_active = false;
 		in_out_vrr->btr.inserted_duration_in_us = 0;
 		in_out_vrr->btr.frames_to_insert = 0;
 		in_out_vrr->btr.frame_counter = 0;
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
 	bool update = false;
 	unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
 
-	if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+	//Compute the exit refresh rate and exit frame duration
+	unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+			+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
+	unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+
+	if (last_render_time_in_us < exit_frame_duration_in_us) {
 		/* Exit Fixed Refresh mode */
 		if (in_out_vrr->fixed.fixed_active) {
 			in_out_vrr->fixed.frame_counter++;
@@ -627,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
 static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
 		struct dc_info_packet *infopacket)
 {
-	if (app_tf != transfer_func_unknown) {
+	if (app_tf != TRANSFER_FUNC_UNKNOWN) {
 		infopacket->valid = true;
 
 		infopacket->sb[6] |= 0x08;  // PB6 = [Bit 3 = Native Color Active]
 
-		if (app_tf == transfer_func_gamma_22) {
+		if (app_tf == TRANSFER_FUNC_GAMMA_22) {
 			infopacket->sb[9] |= 0x04;  // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
 		}
 	}
@@ -707,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
 		return;
 
 	switch (packet_type) {
-	case packet_type_fs2:
+	case PACKET_TYPE_FS2:
 		build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
 		break;
-	case packet_type_vrr:
-	case packet_type_fs1:
+	case PACKET_TYPE_VRR:
+	case PACKET_TYPE_FS1:
 	default:
 		build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
 	}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 786b343..5b1c9a4 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,15 +26,13 @@
 #ifndef MOD_INFO_PACKET_H_
 #define MOD_INFO_PACKET_H_
 
-struct info_packet_inputs {
-	const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
 
-struct info_packets {
-	struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
 
-void mod_build_infopackets(struct info_packet_inputs *inputs,
-		struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+		struct dc_info_packet *info_packet);
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 238c431..1bd02c0 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -23,27 +23,26 @@
  *
  */
 
-
 #ifndef MOD_SHARED_H_
 #define MOD_SHARED_H_
 
 enum color_transfer_func {
-	transfer_func_unknown,
-	transfer_func_srgb,
-	transfer_func_bt709,
-	transfer_func_pq2084,
-	transfer_func_pq2084_interim,
-	transfer_func_linear_0_1,
-	transfer_func_linear_0_125,
-	transfer_func_dolbyvision,
-	transfer_func_gamma_22,
-	transfer_func_gamma_26
+	TRANSFER_FUNC_UNKNOWN,
+	TRANSFER_FUNC_SRGB,
+	TRANSFER_FUNC_BT709,
+	TRANSFER_FUNC_PQ2084,
+	TRANSFER_FUNC_PQ2084_INTERIM,
+	TRANSFER_FUNC_LINEAR_0_1,
+	TRANSFER_FUNC_LINEAR_0_125,
+	TRANSFER_FUNC_GAMMA_22,
+	TRANSFER_FUNC_GAMMA_26
 };
 
 enum vrr_packet_type {
-	packet_type_vrr,
-	packet_type_fs1,
-	packet_type_fs2
+	PACKET_TYPE_VRR,
+	PACKET_TYPE_FS1,
+	PACKET_TYPE_FS2
 };
 
+
 #endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ff8bfb9..db06fab 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -25,6 +25,10 @@
 
 #include "mod_info_packet.h"
 #include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
 
 enum ColorimetryRGBDP {
 	ColorimetryRGB_DP_sRGB               = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
 	ColorimetryYCC_DP_ITU2020YCbCr  = 7,
 };
 
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 		struct dc_info_packet *info_packet)
 {
 	unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 	 *   DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
 	 *   (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
 	 *   the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
-	 *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+	 *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
 	 */
 	if (vscPacketRevision == 0x5) {
 		/* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 
 }
 
-void mod_build_infopackets(struct info_packet_inputs *inputs,
-		struct info_packets *info_packets)
-{
-	if (info_packets->pVscInfoPacket != NULL)
-		mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index 0000000..87851f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'power' sub-module of DAL.
+#
+
+MOD_POWER = power_helpers.o
+
+AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER))
+#$(info ************  DAL POWER MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER)
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
new file mode 100644
index 0000000..00f63b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -0,0 +1,326 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "power_helpers.h"
+#include "dc/inc/hw/dmcu.h"
+
+#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+
+/* Possible Min Reduction config from least aggressive to most aggressive
+ *  0    1     2     3     4     5     6     7     8     9     10    11   12
+ * 100  98.0 94.1  94.1  85.1  80.3  75.3  69.4  60.0  57.6  50.2  49.8  40.0 %
+ */
+static const unsigned char min_reduction_table[13] = {
+0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
+
+/* Possible Max Reduction configs from least aggressive to most aggressive
+ *  0    1     2     3     4     5     6     7     8     9     10    11   12
+ * 96.1 89.8 85.1  80.3  69.4  64.7  64.7  50.2  39.6  30.2  30.2  30.2  19.6 %
+ */
+static const unsigned char max_reduction_table[13] = {
+0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
+
+/* Predefined ABM configuration sets. We may have different configuration sets
+ * in order to satisfy different power/quality requirements.
+ */
+static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
+/*  ABM Level 1,    ABM Level 2,    ABM Level 3,    ABM Level 4 */
+{       2,              5,              7,              8       },	/* Default - Medium aggressiveness */
+{       2,              5,              8,              11      },	/* Alt #1  - Increased aggressiveness */
+{       0,              2,              4,              8       },	/* Alt #2  - Minimal aggressiveness */
+{       3,              6,              10,             12      },	/* Alt #3  - Super aggressiveness */
+};
+
+#define NUM_AMBI_LEVEL    5
+#define NUM_AGGR_LEVEL    4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/* NOTE: iRAM is 256B in size */
+struct iram_table_v_2 {
+	/* flags                      */
+	uint16_t flags;							/* 0x00 U16  */
+
+	/* parameters for ABM2.0 algorithm */
+	uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x02 U0.8 */
+	uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x16 U0.8 */
+	uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];	/* 0x2a U2.6 */
+	uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];	/* 0x3e U2.6 */
+	uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x52 U2.6 */
+	uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL];		/* 0x66 U2.6 */
+	uint8_t iir_curve[NUM_AMBI_LEVEL];				/* 0x7a U0.8 */
+	uint8_t deviation_gain;						/* 0x7f U0.8 */
+
+	/* parameters for crgb conversion */
+	uint16_t crgb_thresh[NUM_POWER_FN_SEGS];			/* 0x80 U3.13 */
+	uint16_t crgb_offset[NUM_POWER_FN_SEGS];			/* 0x90 U1.15 */
+	uint16_t crgb_slope[NUM_POWER_FN_SEGS];				/* 0xa0 U4.12 */
+
+	/* parameters for custom curve */
+	/* thresholds for brightness --> backlight */
+	uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS];		/* 0xb0 U16.0 */
+	/* offsets for brightness --> backlight */
+	uint16_t backlight_offsets[NUM_BL_CURVE_SEGS];			/* 0xd0 U16.0 */
+
+	/* For reading PSR State directly from IRAM */
+	uint8_t psr_state;						/* 0xf0       */
+	uint8_t dmcu_interface_version;					/* 0xf1       */
+	uint8_t dmcu_date_version_year_b0;				/* 0xf2       */
+	uint8_t dmcu_date_version_year_b1;				/* 0xf3       */
+	uint8_t dmcu_date_version_month;				/* 0xf4       */
+	uint8_t dmcu_date_version_day;					/* 0xf5       */
+	uint8_t dmcu_state;						/* 0xf6       */
+
+	uint16_t blRampReduction;					/* 0xf7       */
+	uint16_t blRampStart;						/* 0xf9       */
+	uint8_t dummy5;							/* 0xfb       */
+	uint8_t dummy6;							/* 0xfc       */
+	uint8_t dummy7;							/* 0xfd       */
+	uint8_t dummy8;							/* 0xfe       */
+	uint8_t dummy9;							/* 0xff       */
+};
+
+static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
+{
+	return (uint16_t)(backlight_8bit * 0x101);
+}
+
+static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
+		struct iram_table_v_2 *table)
+{
+	unsigned int i;
+	unsigned int num_entries = NUM_BL_CURVE_SEGS;
+	unsigned int query_input_8bit;
+	unsigned int query_output_8bit;
+	unsigned int lut_index;
+
+	table->backlight_thresholds[0] = 0;
+	table->backlight_offsets[0] = params.backlight_lut_array[0];
+	table->backlight_thresholds[num_entries-1] = 0xFFFF;
+	table->backlight_offsets[num_entries-1] =
+		params.backlight_lut_array[params.backlight_lut_array_size - 1];
+
+	/* Setup all brightness levels between 0% and 100% exclusive
+	 * Fills brightness-to-backlight transform table. Backlight custom curve
+	 * describes transform from brightness to backlight. It will be defined
+	 * as set of thresholds and set of offsets, together, implying
+	 * extrapolation of custom curve into 16 uniformly spanned linear
+	 * segments.  Each threshold/offset represented by 16 bit entry in
+	 * format U4.10.
+	 */
+	for (i = 1; i+1 < num_entries; i++) {
+		query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
+
+		lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+		ASSERT(lut_index < params.backlight_lut_array_size);
+		query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
+
+		table->backlight_thresholds[i] =
+				backlight_8_to_16(query_input_8bit);
+		table->backlight_offsets[i] =
+				backlight_8_to_16(query_output_8bit);
+	}
+}
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+	struct dmcu_iram_parameters params)
+{
+	struct iram_table_v_2 ram_table;
+	unsigned int set = params.set;
+
+	if (dmcu == NULL)
+		return false;
+
+	if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+		return true;
+
+	memset(&ram_table, 0, sizeof(ram_table));
+
+	ram_table.flags = 0x0;
+	ram_table.deviation_gain = 0xb3;
+
+	ram_table.blRampReduction =
+		cpu_to_be16(params.backlight_ramping_reduction);
+	ram_table.blRampStart =
+		cpu_to_be16(params.backlight_ramping_start);
+
+	ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
+	ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
+
+	ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
+	ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
+
+	ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
+	ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
+
+	ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
+	ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
+
+	ram_table.bright_pos_gain[0][0] = 0x20;
+	ram_table.bright_pos_gain[0][1] = 0x20;
+	ram_table.bright_pos_gain[0][2] = 0x20;
+	ram_table.bright_pos_gain[0][3] = 0x20;
+	ram_table.bright_pos_gain[1][0] = 0x20;
+	ram_table.bright_pos_gain[1][1] = 0x20;
+	ram_table.bright_pos_gain[1][2] = 0x20;
+	ram_table.bright_pos_gain[1][3] = 0x20;
+	ram_table.bright_pos_gain[2][0] = 0x20;
+	ram_table.bright_pos_gain[2][1] = 0x20;
+	ram_table.bright_pos_gain[2][2] = 0x20;
+	ram_table.bright_pos_gain[2][3] = 0x20;
+	ram_table.bright_pos_gain[3][0] = 0x20;
+	ram_table.bright_pos_gain[3][1] = 0x20;
+	ram_table.bright_pos_gain[3][2] = 0x20;
+	ram_table.bright_pos_gain[3][3] = 0x20;
+	ram_table.bright_pos_gain[4][0] = 0x20;
+	ram_table.bright_pos_gain[4][1] = 0x20;
+	ram_table.bright_pos_gain[4][2] = 0x20;
+	ram_table.bright_pos_gain[4][3] = 0x20;
+	ram_table.bright_neg_gain[0][1] = 0x00;
+	ram_table.bright_neg_gain[0][2] = 0x00;
+	ram_table.bright_neg_gain[0][3] = 0x00;
+	ram_table.bright_neg_gain[1][0] = 0x00;
+	ram_table.bright_neg_gain[1][1] = 0x00;
+	ram_table.bright_neg_gain[1][2] = 0x00;
+	ram_table.bright_neg_gain[1][3] = 0x00;
+	ram_table.bright_neg_gain[2][0] = 0x00;
+	ram_table.bright_neg_gain[2][1] = 0x00;
+	ram_table.bright_neg_gain[2][2] = 0x00;
+	ram_table.bright_neg_gain[2][3] = 0x00;
+	ram_table.bright_neg_gain[3][0] = 0x00;
+	ram_table.bright_neg_gain[3][1] = 0x00;
+	ram_table.bright_neg_gain[3][2] = 0x00;
+	ram_table.bright_neg_gain[3][3] = 0x00;
+	ram_table.bright_neg_gain[4][0] = 0x00;
+	ram_table.bright_neg_gain[4][1] = 0x00;
+	ram_table.bright_neg_gain[4][2] = 0x00;
+	ram_table.bright_neg_gain[4][3] = 0x00;
+	ram_table.dark_pos_gain[0][0] = 0x00;
+	ram_table.dark_pos_gain[0][1] = 0x00;
+	ram_table.dark_pos_gain[0][2] = 0x00;
+	ram_table.dark_pos_gain[0][3] = 0x00;
+	ram_table.dark_pos_gain[1][0] = 0x00;
+	ram_table.dark_pos_gain[1][1] = 0x00;
+	ram_table.dark_pos_gain[1][2] = 0x00;
+	ram_table.dark_pos_gain[1][3] = 0x00;
+	ram_table.dark_pos_gain[2][0] = 0x00;
+	ram_table.dark_pos_gain[2][1] = 0x00;
+	ram_table.dark_pos_gain[2][2] = 0x00;
+	ram_table.dark_pos_gain[2][3] = 0x00;
+	ram_table.dark_pos_gain[3][0] = 0x00;
+	ram_table.dark_pos_gain[3][1] = 0x00;
+	ram_table.dark_pos_gain[3][2] = 0x00;
+	ram_table.dark_pos_gain[3][3] = 0x00;
+	ram_table.dark_pos_gain[4][0] = 0x00;
+	ram_table.dark_pos_gain[4][1] = 0x00;
+	ram_table.dark_pos_gain[4][2] = 0x00;
+	ram_table.dark_pos_gain[4][3] = 0x00;
+	ram_table.dark_neg_gain[0][0] = 0x00;
+	ram_table.dark_neg_gain[0][1] = 0x00;
+	ram_table.dark_neg_gain[0][2] = 0x00;
+	ram_table.dark_neg_gain[0][3] = 0x00;
+	ram_table.dark_neg_gain[1][0] = 0x00;
+	ram_table.dark_neg_gain[1][1] = 0x00;
+	ram_table.dark_neg_gain[1][2] = 0x00;
+	ram_table.dark_neg_gain[1][3] = 0x00;
+	ram_table.dark_neg_gain[2][0] = 0x00;
+	ram_table.dark_neg_gain[2][1] = 0x00;
+	ram_table.dark_neg_gain[2][2] = 0x00;
+	ram_table.dark_neg_gain[2][3] = 0x00;
+	ram_table.dark_neg_gain[3][0] = 0x00;
+	ram_table.dark_neg_gain[3][1] = 0x00;
+	ram_table.dark_neg_gain[3][2] = 0x00;
+	ram_table.dark_neg_gain[3][3] = 0x00;
+	ram_table.dark_neg_gain[4][0] = 0x00;
+	ram_table.dark_neg_gain[4][1] = 0x00;
+	ram_table.dark_neg_gain[4][2] = 0x00;
+	ram_table.dark_neg_gain[4][3] = 0x00;
+	ram_table.iir_curve[0] = 0x65;
+	ram_table.iir_curve[1] = 0x65;
+	ram_table.iir_curve[2] = 0x65;
+	ram_table.iir_curve[3] = 0x65;
+	ram_table.iir_curve[4] = 0x65;
+	ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
+	ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
+	ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
+	ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
+	ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
+	ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
+	ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
+	ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
+	ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
+	ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
+	ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
+	ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
+	ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
+	ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
+	ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
+	ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
+	ram_table.crgb_slope[0]  = cpu_to_be16(0x3147);
+	ram_table.crgb_slope[1]  = cpu_to_be16(0x2978);
+	ram_table.crgb_slope[2]  = cpu_to_be16(0x23a2);
+	ram_table.crgb_slope[3]  = cpu_to_be16(0x1f55);
+	ram_table.crgb_slope[4]  = cpu_to_be16(0x1c63);
+	ram_table.crgb_slope[5]  = cpu_to_be16(0x1a0f);
+	ram_table.crgb_slope[6]  = cpu_to_be16(0x178d);
+	ram_table.crgb_slope[7]  = cpu_to_be16(0x15ab);
+
+	fill_backlight_transform_table(
+			params, &ram_table);
+
+	return dmcu->funcs->load_iram(
+			dmcu, 0, (char *)(&ram_table), sizeof(ram_table));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
new file mode 100644
index 0000000..da5df00f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_POWER_POWER_HELPERS_H_
+#define MODULES_POWER_POWER_HELPERS_H_
+
+#include "dc/inc/hw/dmcu.h"
+
+
+enum abm_defines {
+	abm_defines_max_level = 4,
+	abm_defines_max_config = 4,
+};
+
+struct dmcu_iram_parameters {
+	unsigned int *backlight_lut_array;
+	unsigned int backlight_lut_array_size;
+	unsigned int backlight_ramping_reduction;
+	unsigned int backlight_ramping_start;
+	unsigned int set;
+};
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+		struct dmcu_iram_parameters params);
+
+#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 9b9699f..c72cbfe 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -52,6 +52,30 @@ struct atif_sbios_requests {
 	u8 backlight_level;	/* panel backlight level (0-255) */
 } __packed;
 
+struct atif_qbtc_arguments {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u8 requested_display;	/* which display is requested */
+} __packed;
+
+#define ATIF_QBTC_MAX_DATA_POINTS 99
+
+struct atif_qbtc_data_point {
+	u8 luminance;		/* luminance in percent */
+	u8 ipnut_signal;	/* input signal in range 0-255 */
+} __packed;
+
+struct atif_qbtc_output {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 flags;		/* all zeroes */
+	u8 error_code;		/* error code */
+	u8 ac_level;		/* default brightness on AC power */
+	u8 dc_level;		/* default brightness on DC power */
+	u8 min_input_signal;	/* max input signal in range 0-255 */
+	u8 max_input_signal;	/* min input signal in range 0-255 */
+	u8 number_of_points;	/* number of data points */
+	struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
+} __packed;
+
 #define ATIF_NOTIFY_MASK	0x3
 #define ATIF_NOTIFY_NONE	0
 #define ATIF_NOTIFY_81		1
@@ -126,26 +150,18 @@ struct atcs_pref_req_output {
  * DWORD - supported functions bit vector
  */
 /* Notifications mask */
-#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
-#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
 #       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
 #       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
 #       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
-#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
-#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
 #       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
 #       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+#       define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED      (1 << 12)
 /* supported functions vector */
 #       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
 #       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
-#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
-#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
-#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
-#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
-#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
-#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
 #       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
-#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#       define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15)
+#       define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED        (1 << 16)
 #       define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED        (1 << 20)
 #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
@@ -170,6 +186,10 @@ struct atcs_pref_req_output {
  * n (0xd0-0xd9) is specified in notify command code.
  * bit 2:
  * 1 - lid changes not reported though int10
+ * bit 3:
+ * 1 - system bios controls overclocking
+ * bit 4:
+ * 1 - enable overclocking
  */
 #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
@@ -177,28 +197,23 @@ struct atcs_pref_req_output {
  * OUTPUT:
  * WORD  - structure size in bytes (includes size field)
  * DWORD - pending sbios requests
- * BYTE  - panel expansion mode
+ * BYTE  - reserved (all zeroes)
  * BYTE  - thermal state: target gfx controller
  * BYTE  - thermal state: state id (0: exit state, non-0: state)
  * BYTE  - forced power state: target gfx controller
- * BYTE  - forced power state: state id
+ * BYTE  - forced power state: state id (0: forced state, non-0: state)
  * BYTE  - system power source
  * BYTE  - panel backlight level (0-255)
+ * BYTE  - GPU package power limit: target gfx controller
+ * DWORD - GPU package power limit: value (24:8 fractional format, Watts)
  */
 /* pending sbios requests */
-#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
-#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
 #       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
 #       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
 #       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
-#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
-#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
 #       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
 #       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
-/* panel expansion mode */
-#       define ATIF_PANEL_EXPANSION_DISABLE                        0
-#       define ATIF_PANEL_EXPANSION_FULL                           1
-#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+#       define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST                (1 << 12)
 /* target gfx controller */
 #       define ATIF_TARGET_GFX_SINGLE                              0
 #       define ATIF_TARGET_GFX_PX_IGPU                             1
@@ -208,76 +223,6 @@ struct atcs_pref_req_output {
 #       define ATIF_POWER_SOURCE_DC                                2
 #       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
 #       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
-#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
-/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * WORD  - selected displays
- * WORD  - connected displays
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * WORD  - selected displays
- */
-#       define ATIF_LCD1                                           (1 << 0)
-#       define ATIF_CRT1                                           (1 << 1)
-#       define ATIF_TV                                             (1 << 2)
-#       define ATIF_DFP1                                           (1 << 3)
-#       define ATIF_CRT2                                           (1 << 4)
-#       define ATIF_LCD2                                           (1 << 5)
-#       define ATIF_DFP2                                           (1 << 7)
-#       define ATIF_CV                                             (1 << 8)
-#       define ATIF_DFP3                                           (1 << 9)
-#       define ATIF_DFP4                                           (1 << 10)
-#       define ATIF_DFP5                                           (1 << 11)
-#       define ATIF_DFP6                                           (1 << 12)
-#define ATIF_FUNCTION_GET_LID_STATE                                0x4
-/* ARG0: ATIF_FUNCTION_GET_LID_STATE
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - lid state (0: open, 1: closed)
- *
- * GET_LID_STATE only works at boot and resume, for general lid
- * status, use the kernel provided status
- */
-#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
-/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - 0
- * BYTE  - TV standard
- */
-#       define ATIF_TV_STD_NTSC                                    0
-#       define ATIF_TV_STD_PAL                                     1
-#       define ATIF_TV_STD_PALM                                    2
-#       define ATIF_TV_STD_PAL60                                   3
-#       define ATIF_TV_STD_NTSCJ                                   4
-#       define ATIF_TV_STD_PALCN                                   5
-#       define ATIF_TV_STD_PALN                                    6
-#       define ATIF_TV_STD_SCART_RGB                               9
-#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
-/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - 0
- * BYTE  - TV standard
- * OUTPUT: none
- */
-#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
-/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - panel expansion mode
- */
-#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
-/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
- * ARG1:
- * WORD  - structure size in bytes (includes size field)
- * BYTE  - panel expansion mode
- * OUTPUT: none
- */
 #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
 /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
  * ARG1:
@@ -286,21 +231,43 @@ struct atcs_pref_req_output {
  * BYTE  - current temperature (degress Celsius)
  * OUTPUT: none
  */
-#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
-/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
- * ARG1: none
+#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS    0x10
+/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - requested display
  * OUTPUT:
- * WORD  - number of gfx devices
- * WORD  - device structure size in bytes (excludes device size field)
- * DWORD - flags         \
- * WORD  - bus number     } repeated structure
- * WORD  - device number /
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - flags (currently all 16 bits are reserved)
+ * BYTE  - error code (on failure, disregard all below fields)
+ * BYTE  - AC level (default brightness in percent when machine has full power)
+ * BYTE  - DC level (default brightness in percent when machine is on battery)
+ * BYTE  - min input signal, in range 0-255, corresponding to 0% backlight
+ * BYTE  - max input signal, in range 0-255, corresponding to 100% backlight
+ * BYTE  - number of reported data points
+ * BYTE  - luminance level in percent  \ repeated structure
+ * BYTE  - input signal in range 0-255 / does not have entries for 0% and 100%
  */
-/* flags */
-#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
-#       define ATIF_XGP_PORT                                       (1 << 1)
-#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
-#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+/* requested display */
+#       define ATIF_QBTC_REQUEST_LCD1                              0
+#       define ATIF_QBTC_REQUEST_CRT1                              1
+#       define ATIF_QBTC_REQUEST_DFP1                              3
+#       define ATIF_QBTC_REQUEST_CRT2                              4
+#       define ATIF_QBTC_REQUEST_LCD2                              5
+#       define ATIF_QBTC_REQUEST_DFP2                              7
+#       define ATIF_QBTC_REQUEST_DFP3                              9
+#       define ATIF_QBTC_REQUEST_DFP4                              10
+#       define ATIF_QBTC_REQUEST_DFP5                              11
+#       define ATIF_QBTC_REQUEST_DFP6                              12
+/* error code */
+#       define ATIF_QBTC_ERROR_CODE_SUCCESS                        0
+#       define ATIF_QBTC_ERROR_CODE_FAILURE                        1
+#       define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED           2
+#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION                 0x11
+/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
 #define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION                 0x15
 /* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
  * ARG1: none
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644
index 0000000..8f51587
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL                                                                          0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX                                                                 0
+#define mmMC_VM_XGMI_LFB_SIZE                                                                          0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX                                                                 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644
index 0000000..0a6b072
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT                                                             0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT                                                             0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK                                                               0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK                                                               0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT                                                               0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK                                                                 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 64ecffd..8154d67 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -188,8 +188,8 @@ struct tile_config {
  */
 #define ALLOC_MEM_FLAGS_VRAM		(1 << 0)
 #define ALLOC_MEM_FLAGS_GTT		(1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR		(1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL	(1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
 
 /*
  * Allocation flags attributes/access options.
@@ -205,20 +205,6 @@ struct tile_config {
 /**
  * struct kfd2kgd_calls
  *
- * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
- * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
- *
- * @get_local_mem_info: Retrieves information about GPU local memory
- *
- * @get_gpu_clock_counter: Retrieves GPU clock counter
- *
- * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
- *
- * @alloc_pasid: Allocate a PASID
- * @free_pasid: Free a PASID
- *
  * @program_sh_mem_settings: A function that should initiate the memory
  * properties such as main aperture memory type (cache / non cached) and
  * secondary aperture base address, size and memory type.
@@ -255,64 +241,16 @@ struct tile_config {
  *
  * @get_tile_config: Returns GPU-specific tiling mode information
  *
- * @get_cu_info: Retrieves activated cu info
- *
- * @get_vram_usage: Returns current VRAM usage
- *
- * @create_process_vm: Create a VM address space for a given process and GPU
- *
- * @destroy_process_vm: Destroy a VM
- *
- * @get_process_page_dir: Get physical address of a VM page directory
- *
  * @set_vm_context_page_table_base: Program page table base for a VMID
  *
- * @alloc_memory_of_gpu: Allocate GPUVM memory
- *
- * @free_memory_of_gpu: Free GPUVM memory
- *
- * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
- * space. Allocates and updates page tables and page directories as
- * needed. This function may return before all page table updates have
- * completed. This allows multiple map operations (on multiple GPUs)
- * to happen concurrently. Use sync_memory to synchronize with all
- * pending updates.
- *
- * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
- *
- * @sync_memory: Wait for pending page table updates to complete
- *
- * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
- * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
- * The kernel virtual address remains valid until the BO is freed.
- *
- * @restore_process_bos: Restore all BOs that belong to the
- * process. This is intended for restoring memory mappings after a TTM
- * eviction.
- *
  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
  *
  * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
  *
- * @submit_ib: Submits an IB to the engine specified by inserting the
- * IB to the corresponding ring (ring type). The IB is executed with the
- * specified VMID in a user mode context.
- *
- * @get_vm_fault_info: Return information about a recent VM fault on
- * GFXv7 and v8. If multiple VM faults occurred since the last call of
- * this function, it will return information about the first of those
- * faults. On GFXv9 VM fault information is fully contained in the IH
- * packet and this function is not needed.
- *
  * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
  * IH ring entry. This function allows the KFD ISR to get the VMID
  * from the fault status register as early as possible.
  *
- * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
- *
- * @set_compute_idle: Indicates that compute is idle on a device. This
- * can be used to change power profiles depending on compute activity.
- *
  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
  *
  * This structure contains function pointers to services that the kgd driver
@@ -320,21 +258,6 @@ struct tile_config {
  *
  */
 struct kfd2kgd_calls {
-	int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
-					void **mem_obj, uint64_t *gpu_addr,
-					void **cpu_ptr, bool mqd_gfx9);
-
-	void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
-
-	void (*get_local_mem_info)(struct kgd_dev *kgd,
-			struct kfd_local_mem_info *mem_info);
-	uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
-
-	uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
-
-	int (*alloc_pasid)(unsigned int bits);
-	void (*free_pasid)(unsigned int pasid);
-
 	/* Register access functions */
 	void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
 			uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
@@ -398,49 +321,11 @@ struct kfd2kgd_calls {
 				uint64_t va, uint32_t vmid);
 	int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
 
-	void (*get_cu_info)(struct kgd_dev *kgd,
-			struct kfd_cu_info *cu_info);
-	uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
-
-	int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
-			void **process_info, struct dma_fence **ef);
-	int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
-			unsigned int pasid, void **vm, void **process_info,
-			struct dma_fence **ef);
-	void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
-	void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
-	uint64_t (*get_process_page_dir)(void *vm);
 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
 			uint32_t vmid, uint64_t page_table_base);
-	int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
-			uint64_t size, void *vm,
-			struct kgd_mem **mem, uint64_t *offset,
-			uint32_t flags);
-	int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
-	int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void *vm);
-	int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void *vm);
-	int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
-	int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
-			void **kptr, uint64_t *size);
-	int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
-
 	int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
 	int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
-
-	int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
-			uint32_t vmid, uint64_t gpu_addr,
-			uint32_t *ib_cmd, uint32_t ib_len);
-
-	int (*get_vm_fault_info)(struct kgd_dev *kgd,
-			struct kfd_vm_fault_info *info);
 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
-
-	void (*gpu_recover)(struct kgd_dev *kgd);
-
-	void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
-
 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
 
 };
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 980e696..1479ea1 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -276,6 +276,10 @@ struct amd_pm_funcs {
 		struct amd_pp_simple_clock_info *clocks);
 	int (*notify_smu_enable_pwe)(void *handle);
 	int (*enable_mgpu_fan_boost)(void *handle);
+	int (*set_active_display_count)(void *handle, uint32_t count);
+	int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+	int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+	int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d6aa1d4..9bc27f4 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 		return;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -720,12 +720,12 @@ static int pp_dpm_force_clock_level(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-		pr_info("force clock level is for dpm manual mode only.\n");
+		pr_debug("force clock level is for dpm manual mode only.\n");
 		return -EINVAL;
 	}
 
@@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 	mutex_lock(&hwmgr->smu_lock);
@@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return snprintf(buf, PAGE_SIZE, "\n");
 	}
 
@@ -894,12 +894,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 		return ret;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return ret;
 	}
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-		pr_info("power profile setting is for manual dpm mode only.\n");
+		pr_debug("power profile setting is for manual dpm mode only.\n");
 		return ret;
 	}
 
@@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
 
 	if (ret) {
-		pr_info("Error in phm_get_clock_info \n");
+		pr_debug("Error in phm_get_clock_info \n");
 		mutex_unlock(&hwmgr->smu_lock);
 		return -EINVAL;
 	}
@@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 		return 0;
 
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
 	}
 
@@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 
@@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
 	}
 
@@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
+		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;;
 	}
 
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 	return 0;
 }
 
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+	struct pp_hwmgr *hwmgr = handle;
+
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
+
+	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+		pr_debug("%s was not implemented.\n", __func__);
+		return -EINVAL;;
+	}
+
+	mutex_lock(&hwmgr->smu_lock);
+	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+	mutex_unlock(&hwmgr->smu_lock);
+
+	return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+	struct pp_hwmgr *hwmgr = handle;
+
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
+
+	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+		pr_debug("%s was not implemented.\n", __func__);
+		return -EINVAL;;
+	}
+
+	mutex_lock(&hwmgr->smu_lock);
+	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+	mutex_unlock(&hwmgr->smu_lock);
+
+	return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+	struct pp_hwmgr *hwmgr = handle;
+
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
+
+	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+		pr_debug("%s was not implemented.\n", __func__);
+		return -EINVAL;;
+	}
+
+	mutex_lock(&hwmgr->smu_lock);
+	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+	mutex_unlock(&hwmgr->smu_lock);
+
+	return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+	struct pp_hwmgr *hwmgr = handle;
+	int ret = 0;
+
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
+
+	mutex_lock(&hwmgr->smu_lock);
+	ret = phm_set_active_display_count(hwmgr, count);
+	mutex_unlock(&hwmgr->smu_lock);
+
+	return ret;
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
 	.load_firmware = pp_dpm_load_fw,
 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
 	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+	.set_active_display_count = pp_set_active_display_count,
+	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
 };
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a2a7e0e..1f92a9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -288,8 +288,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 	if (display_config == NULL)
 		return -EINVAL;
 
-	if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
-		hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+	if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+		hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
 
 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
 		if (display_config->displays[index].controller_id != 0)
@@ -480,3 +480,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
 
 	return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
 }
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (!hwmgr->hwmgr_func->set_active_display_count)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index dd18cb7..f95c5f5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
 	return (clock + 99) / 100;
 }
 
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
 {
 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
 
 	if (smu10_data->need_min_deep_sleep_dcefclk &&
-	    smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+		smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
 		smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 					PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
 	return 0;
 }
 
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+	if (smu10_data->dcf_actual_hard_min_freq &&
+		smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+		smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetHardMinDcefclkByFreq,
+					smu10_data->dcf_actual_hard_min_freq);
+	}
+	return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+	if (smu10_data->f_actual_hard_min_freq &&
+		smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+		smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetHardMinFclkByFreq,
+					smu10_data->f_actual_hard_min_freq);
+	}
+	return 0;
+}
+
 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
 {
 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.get_max_high_clocks = smu10_get_max_high_clocks,
 	.read_sensor = smu10_read_sensor,
 	.set_active_display_count = smu10_set_active_display_count,
-	.set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+	.set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
 	.dynamic_state_management_enable = smu10_enable_dpm_tasks,
 	.power_off_asic = smu10_power_off_asic,
 	.asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.display_clock_voltage_request = smu10_display_clock_voltage_request,
 	.powergate_gfx = smu10_gfx_off_control,
 	.powergate_sdma = smu10_powergate_sdma,
+	.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+	.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b61a01f..d913904 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 					hwmgr->dyn_state.mvdd_dependency_on_mclk);
 
 		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 MVDD table from dependancy table.",
+				"Failed to retrieve SVI2 MVDD table from dependency table.",
 				return result;);
 	}
 
@@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
 					hwmgr->dyn_state.vddci_dependency_on_mclk);
 		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
+				"Failed to retrieve SVI2 VDDCI table from dependency table.",
 				return result);
 	}
 
@@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 				table_info->vddc_lookup_table);
 
 		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+			"Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
 	}
 
 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
-		switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+		if (hwmgr->is_kicker)
+			switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+		else
+			switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
 		break;
 	case CHIP_VEGAM:
 		switch_limit_us = 30;
@@ -4223,9 +4226,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 	if (tmp & (1 << 23)) {
 		data->mem_latency_high = MEM_LATENCY_HIGH;
 		data->mem_latency_low = MEM_LATENCY_LOW;
+		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+		    (hwmgr->chip_id == CHIP_POLARIS11) ||
+		    (hwmgr->chip_id == CHIP_POLARIS12))
+			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
 	} else {
 		data->mem_latency_high = 330;
 		data->mem_latency_low = 330;
+		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+		    (hwmgr->chip_id == CHIP_POLARIS11) ||
+		    (hwmgr->chip_id == CHIP_POLARIS12))
+			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 5e19f59..d138ddae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 		mutex_lock(&adev->grbm_idx_mutex);
 		value = 0;
 		value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 					"Failed to enable DPM DIDT.", goto error);
 		}
 		mutex_unlock(&adev->grbm_idx_mutex);
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
 error:
 	mutex_unlock(&adev->grbm_idx_mutex);
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return result;
 }
 
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 		result = smu7_enable_didt(hwmgr, false);
 		PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 			PP_ASSERT_WITH_CODE((0 == result),
 					"Failed to disable DPM DIDT.", goto error);
 		}
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
+		amdgpu_gfx_rlc_exit_safe_mode(adev);
 	}
 
 	return 0;
 error:
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 	return result;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index fef111d..553a203 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
-	}
 	return 0;
 }
 
 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
 		return smum_send_msg_to_smc_with_parameter(
 			hwmgr,
 			PPSMC_MSG_UVDPowerON,
@@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
 	.power_state_set = smu8_set_power_state_tasks,
 	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
 	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+	.update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
 	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 2d88abf..6f26cb2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
 	if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
 
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
 
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 
-	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	amdgpu_gfx_rlc_exit_safe_mode(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b7fce5..2e99ecf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2777,7 +2777,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
 				i, clocks.data[i].clocks_in_khz / 1000,
-				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
+				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
 		break;
 
 	case PP_MCLK:
@@ -2794,7 +2794,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
 				i, clocks.data[i].clocks_in_khz / 1000,
-				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
+				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
 		break;
 
 	case PP_PCIE:
@@ -3476,109 +3476,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
 
 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	/* init/fini related */
-	.backend_init =
-		vega20_hwmgr_backend_init,
-	.backend_fini =
-		vega20_hwmgr_backend_fini,
-	.asic_setup =
-		vega20_setup_asic_task,
-	.power_off_asic =
-		vega20_power_off_asic,
-	.dynamic_state_management_enable =
-		vega20_enable_dpm_tasks,
-	.dynamic_state_management_disable =
-		vega20_disable_dpm_tasks,
+	.backend_init = vega20_hwmgr_backend_init,
+	.backend_fini = vega20_hwmgr_backend_fini,
+	.asic_setup = vega20_setup_asic_task,
+	.power_off_asic = vega20_power_off_asic,
+	.dynamic_state_management_enable = vega20_enable_dpm_tasks,
+	.dynamic_state_management_disable = vega20_disable_dpm_tasks,
 	/* power state related */
-	.apply_clocks_adjust_rules =
-		vega20_apply_clocks_adjust_rules,
-	.pre_display_config_changed =
-		vega20_pre_display_configuration_changed_task,
-	.display_config_changed =
-		vega20_display_configuration_changed_task,
+	.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+	.pre_display_config_changed = vega20_pre_display_configuration_changed_task,
+	.display_config_changed = vega20_display_configuration_changed_task,
 	.check_smc_update_required_for_display_configuration =
 		vega20_check_smc_update_required_for_display_configuration,
 	.notify_smc_display_config_after_ps_adjustment =
 		vega20_notify_smc_display_config_after_ps_adjustment,
 	/* export to DAL */
-	.get_sclk =
-		vega20_dpm_get_sclk,
-	.get_mclk =
-		vega20_dpm_get_mclk,
-	.get_dal_power_level =
-		vega20_get_dal_power_level,
-	.get_clock_by_type_with_latency =
-		vega20_get_clock_by_type_with_latency,
-	.get_clock_by_type_with_voltage =
-		vega20_get_clock_by_type_with_voltage,
-	.set_watermarks_for_clocks_ranges =
-		vega20_set_watermarks_for_clocks_ranges,
-	.display_clock_voltage_request =
-		vega20_display_clock_voltage_request,
-	.get_performance_level =
-		vega20_get_performance_level,
+	.get_sclk = vega20_dpm_get_sclk,
+	.get_mclk = vega20_dpm_get_mclk,
+	.get_dal_power_level = vega20_get_dal_power_level,
+	.get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+	.get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
+	.set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
+	.display_clock_voltage_request = vega20_display_clock_voltage_request,
+	.get_performance_level = vega20_get_performance_level,
 	/* UMD pstate, profile related */
-	.force_dpm_level =
-		vega20_dpm_force_dpm_level,
-	.get_power_profile_mode =
-		vega20_get_power_profile_mode,
-	.set_power_profile_mode =
-		vega20_set_power_profile_mode,
+	.force_dpm_level = vega20_dpm_force_dpm_level,
+	.get_power_profile_mode = vega20_get_power_profile_mode,
+	.set_power_profile_mode = vega20_set_power_profile_mode,
 	/* od related */
-	.set_power_limit =
-		vega20_set_power_limit,
-	.get_sclk_od =
-		vega20_get_sclk_od,
-	.set_sclk_od =
-		vega20_set_sclk_od,
-	.get_mclk_od =
-		vega20_get_mclk_od,
-	.set_mclk_od =
-		vega20_set_mclk_od,
-	.odn_edit_dpm_table =
-		vega20_odn_edit_dpm_table,
+	.set_power_limit = vega20_set_power_limit,
+	.get_sclk_od = vega20_get_sclk_od,
+	.set_sclk_od = vega20_set_sclk_od,
+	.get_mclk_od = vega20_get_mclk_od,
+	.set_mclk_od = vega20_set_mclk_od,
+	.odn_edit_dpm_table = vega20_odn_edit_dpm_table,
 	/* for sysfs to retrive/set gfxclk/memclk */
-	.force_clock_level =
-		vega20_force_clock_level,
-	.print_clock_levels =
-		vega20_print_clock_levels,
-	.read_sensor =
-		vega20_read_sensor,
+	.force_clock_level = vega20_force_clock_level,
+	.print_clock_levels = vega20_print_clock_levels,
+	.read_sensor = vega20_read_sensor,
 	/* powergate related */
-	.powergate_uvd =
-		vega20_power_gate_uvd,
-	.powergate_vce =
-		vega20_power_gate_vce,
+	.powergate_uvd = vega20_power_gate_uvd,
+	.powergate_vce = vega20_power_gate_vce,
 	/* thermal related */
-	.start_thermal_controller =
-		vega20_start_thermal_controller,
-	.stop_thermal_controller =
-		vega20_thermal_stop_thermal_controller,
-	.get_thermal_temperature_range =
-		vega20_get_thermal_temperature_range,
-	.register_irq_handlers =
-		smu9_register_irq_handlers,
-	.disable_smc_firmware_ctf =
-		vega20_thermal_disable_alert,
+	.start_thermal_controller = vega20_start_thermal_controller,
+	.stop_thermal_controller = vega20_thermal_stop_thermal_controller,
+	.get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+	.register_irq_handlers = smu9_register_irq_handlers,
+	.disable_smc_firmware_ctf = vega20_thermal_disable_alert,
 	/* fan control related */
-	.get_fan_speed_percent =
-		vega20_fan_ctrl_get_fan_speed_percent,
-	.set_fan_speed_percent =
-		vega20_fan_ctrl_set_fan_speed_percent,
-	.get_fan_speed_info =
-		vega20_fan_ctrl_get_fan_speed_info,
-	.get_fan_speed_rpm =
-		vega20_fan_ctrl_get_fan_speed_rpm,
-	.set_fan_speed_rpm =
-		vega20_fan_ctrl_set_fan_speed_rpm,
-	.get_fan_control_mode =
-		vega20_get_fan_control_mode,
-	.set_fan_control_mode =
-		vega20_set_fan_control_mode,
+	.get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
+	.set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+	.get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
+	.get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
+	.set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
+	.get_fan_control_mode = vega20_get_fan_control_mode,
+	.set_fan_control_mode = vega20_set_fan_control_mode,
 	/* smu memory related */
-	.notify_cac_buffer_info =
-		vega20_notify_cac_buffer_info,
-	.enable_mgpu_fan_boost =
-		vega20_enable_mgpu_fan_boost,
+	.notify_cac_buffer_info = vega20_notify_cac_buffer_info,
+	.enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
 };
 
 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 54fd012..f4dab97 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 
 extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
 extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
 #endif /* _HARDWARE_MANAGER_H_ */
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e5a60aa..0d298a0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -28,7 +28,6 @@
 #include "hardwaremanager.h"
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
-#include "hwmgr_ppt.h"
 #include "power_state.h"
 #include "smu_helper.h"
 
@@ -310,7 +309,7 @@ struct pp_hwmgr_func {
 	int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
 	int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
 	int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
-	int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+	int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
 	int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
 	int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
 					uint32_t virtual_addr_low,
@@ -318,6 +317,9 @@ struct pp_hwmgr_func {
 					uint32_t mc_addr_low,
 					uint32_t mc_addr_hi,
 					uint32_t size);
+	int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
+					bool enable,
+					bool lock);
 	int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
 					struct PP_TemperatureRange *range);
 	int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -330,6 +332,8 @@ struct pp_hwmgr_func {
 	int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
 	int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
 	int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+	int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+	int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
 };
 
 struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 65eb630..94bf7b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -40,10 +40,6 @@
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
 
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index c1a99df..6e19f4c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -397,6 +397,9 @@ typedef uint16_t PPSMC_Result;
 
 #define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
 
+#define PPSMC_MSG_EnableFFC                   ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC                  ((uint16_t) 0x308)
+
 #define PPSMC_MSG_EnableDpmDidt               ((uint16_t) 0x309)
 #define PPSMC_MSG_DisableDpmDidt              ((uint16_t) 0x30A)
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a1e0ac9..52abca0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -44,7 +44,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
@@ -1529,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 	efuse = efuse >> 24;
 
 	if (hwmgr->chip_id == CHIP_POLARIS10) {
-		min = 1000;
-		max = 2300;
+		if (hwmgr->is_kicker) {
+			min = 1200;
+			max = 2500;
+		} else {
+			min = 1000;
+			max = 2300;
+		}
+	} else if (hwmgr->chip_id == CHIP_POLARIS11) {
+		if (hwmgr->is_kicker) {
+			min = 900;
+			max = 2100;
+		} else {
+			min = 1100;
+			max = 2100;
+		}
 	} else {
 		min = 1100;
 		max = 2100;
@@ -1627,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
 	int result = 0;
@@ -1647,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
 
 	if (0 == result) {
+		if (((adev->pdev->device == 0x67ef) &&
+		     ((adev->pdev->revision == 0xe0) ||
+		      (adev->pdev->revision == 0xe5))) ||
+		    ((adev->pdev->device == 0x67ff) &&
+		     ((adev->pdev->revision == 0xcf) ||
+		      (adev->pdev->revision == 0xef) ||
+		      (adev->pdev->revision == 0xff)))) {
+			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+			if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+			    (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+				if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+				    (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+				    (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+				    (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+				    (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+				    (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF718F1D4;
+					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x323FD;
+					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x1E455;
+					avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+					avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+					avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x23;
+				}
+			}
+		} else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF6B024DD;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x3005E;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x18A5F;
+			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+			avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x3B;
+		} else if (((adev->pdev->device == 0x67df) &&
+			    ((adev->pdev->revision == 0xe0) ||
+			     (adev->pdev->revision == 0xe3) ||
+			     (adev->pdev->revision == 0xe4) ||
+			     (adev->pdev->revision == 0xe5) ||
+			     (adev->pdev->revision == 0xe7) ||
+			     (adev->pdev->revision == 0xef))) ||
+			   ((adev->pdev->device == 0x6fdf) &&
+			    ((adev->pdev->revision == 0xef) ||
+			     (adev->pdev->revision == 0xff)))) {
+			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF843B66B;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x59CB5;
+			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0xFFFF287F;
+			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+			avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x58;
+		}
+	}
+
+	if (0 == result) {
 		table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
 		table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
 		table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d0eb8ab..d111dd4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -29,7 +29,6 @@
 #include "rv_ppsmc.h"
 #include "smu10_driver_if.h"
 #include "smu10.h"
-#include "ppatomctrl.h"
 #include "pp_debug.h"
 
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 09b844e..e2787e1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
 					mmSMU_MP1_SRBM2P_ARG_0);
 }
 
-static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+					    uint16_t msg, uint32_t parameter)
 {
 	int result = 0;
+	ktime_t t_start;
+	s64 elapsed_us;
 
 	if (hwmgr == NULL || hwmgr->device == NULL)
 		return -EINVAL;
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
 		/* Read the last message to SMU, to report actual cause */
 		uint32_t val = cgs_read_register(hwmgr->device,
 						 mmSMU_MP1_SRBM2P_MSG_0);
-		pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
-		pr_err("SMU still servicing msg (0x%04x)\n", val);
+		pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
+			__func__, msg, val);
 		return result;
 	}
+	t_start = ktime_get();
+
+	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
 
 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
 
-	return 0;
+	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+
+	elapsed_us = ktime_us_delta(ktime_get(), t_start);
+
+	WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
+			__func__, msg, parameter, elapsed_us);
+
+	return result;
 }
 
-/* Send a message to the SMC, and wait for its response.*/
 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-	int result = 0;
-
-	result = smu8_send_msg_to_smc_async(hwmgr, msg);
-	if (result != 0)
-		return result;
-
-	return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
-					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+	return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
 }
 
 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
-static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-					  uint16_t msg, uint32_t parameter)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
-	return smu8_send_msg_to_smc(hwmgr, msg);
-}
-
 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
 				   uint32_t firmware)
 {
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
 
 	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
 	hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+	pr_info("smu version %02d.%02d.%02d\n",
+		((hwmgr->smu_version >> 16) & 0xFF),
+		((hwmgr->smu_version >> 8) & 0xFF),
+		(hwmgr->smu_version & 0xFF));
 	adev->pm.fw_version = hwmgr->smu_version >> 8;
 
 	return smu8_request_smu_load_fw(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 9f71512..1e69300 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -40,7 +40,6 @@
 
 #include "smu7_hwmgr.h"
 #include "hardwaremanager.h"
-#include "ppatomctrl.h"
 #include "atombios.h"
 #include "pppcielanes.h"
 
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index e8fcf3a..90ef76b 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -20,7 +20,6 @@
 struct arcpgu_drm_private {
 	void __iomem		*regs;
 	struct clk		*clk;
-	struct drm_fbdev_cma	*fbdev;
 	struct drm_framebuffer	*fb;
 	struct drm_crtc		crtc;
 	struct drm_plane	*plane;
@@ -43,8 +42,5 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
 int arc_pgu_setup_crtc(struct drm_device *dev);
 int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
 int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
-struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count);
 
 #endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 965cda4..62f51f7 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -158,8 +158,6 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
 
 static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
 	.mode_valid	= arc_pgu_crtc_mode_valid,
-	.mode_set	= drm_helper_crtc_mode_set,
-	.mode_set_base	= drm_helper_crtc_mode_set_base,
 	.mode_set_nofb	= arc_pgu_crtc_mode_set_nofb,
 	.atomic_begin	= arc_pgu_crtc_atomic_begin,
 	.atomic_enable	= arc_pgu_crtc_atomic_enable,
@@ -186,7 +184,6 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
 
 static void arc_pgu_plane_destroy(struct drm_plane *plane)
 {
-	drm_plane_helper_disable(plane, NULL);
 	drm_plane_cleanup(plane);
 }
 
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f067de4..206a76a 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_atomic_helper.h>
@@ -25,16 +26,8 @@
 #include "arcpgu.h"
 #include "arcpgu_regs.h"
 
-static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
-{
-	struct arcpgu_drm_private *arcpgu = dev->dev_private;
-
-	drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
 static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
 	.fb_create  = drm_gem_fb_create,
-	.output_poll_changed = arcpgu_fb_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
@@ -51,13 +44,6 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
 
 DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
 
-static void arcpgu_lastclose(struct drm_device *drm)
-{
-	struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
-	drm_fbdev_cma_restore_mode(arcpgu->fbdev);
-}
-
 static int arcpgu_load(struct drm_device *drm)
 {
 	struct platform_device *pdev = to_platform_device(drm->dev);
@@ -113,27 +99,14 @@ static int arcpgu_load(struct drm_device *drm)
 	drm_mode_config_reset(drm);
 	drm_kms_helper_poll_init(drm);
 
-	arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
-					   drm->mode_config.num_connector);
-	if (IS_ERR(arcpgu->fbdev)) {
-		ret = PTR_ERR(arcpgu->fbdev);
-		arcpgu->fbdev = NULL;
-		return -ENODEV;
-	}
-
 	platform_set_drvdata(pdev, drm);
 	return 0;
 }
 
 static int arcpgu_unload(struct drm_device *drm)
 {
-	struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
-	if (arcpgu->fbdev) {
-		drm_fbdev_cma_fini(arcpgu->fbdev);
-		arcpgu->fbdev = NULL;
-	}
 	drm_kms_helper_poll_fini(drm);
+	drm_atomic_helper_shutdown(drm);
 	drm_mode_config_cleanup(drm);
 
 	return 0;
@@ -167,7 +140,6 @@ static int arcpgu_debugfs_init(struct drm_minor *minor)
 static struct drm_driver arcpgu_drm_driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
 			   DRIVER_ATOMIC,
-	.lastclose = arcpgu_lastclose,
 	.name = "arcpgu",
 	.desc = "ARC PGU Controller",
 	.date = "20160219",
@@ -210,13 +182,15 @@ static int arcpgu_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_unload;
 
+	drm_fbdev_generic_setup(drm, 16);
+
 	return 0;
 
 err_unload:
 	arcpgu_unload(drm);
 
 err_unref:
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return ret;
 }
@@ -227,7 +201,7 @@ static int arcpgu_remove(struct platform_device *pdev)
 
 	drm_dev_unregister(drm);
 	arcpgu_unload(drm);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 7aad7dd..b9bed11 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -77,12 +77,18 @@ static const struct malidp_format_id malidp500_de_formats[] = {
 	{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) },	\
 	{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) },	\
 	{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) },	\
-	{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+	{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
+	{ DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
 
 static const struct malidp_format_id malidp550_de_formats[] = {
 	MALIDP_COMMON_FORMATS,
 };
 
+static const struct malidp_format_id malidp650_de_formats[] = {
+	MALIDP_COMMON_FORMATS,
+	{ DRM_FORMAT_X0L0, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 4)},
+};
+
 static const struct malidp_layer malidp500_layers[] = {
 	/* id, base address, fb pointer address base, stride offset,
 	 *	yuv2rgb matrix offset, mmu control register offset, rotation_features
@@ -630,6 +636,8 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
 	case DRM_FORMAT_BGR565:
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_X0L0:
+	case DRM_FORMAT_X0L2:
 		bytes_per_col = 32;
 		break;
 	/* 16 lines at 1.5 bytes per pixel */
@@ -905,8 +913,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
 					    MALIDP550_DC_IRQ_SE,
 				.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
 			},
-			.pixel_formats = malidp550_de_formats,
-			.n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
+			.pixel_formats = malidp650_de_formats,
+			.n_pixel_formats = ARRAY_SIZE(malidp650_de_formats),
 			.bus_align_bytes = 16,
 		},
 		.query_hw = malidp650_query_hw,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 837a24d..c9a6d3e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -398,6 +398,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 	struct drm_framebuffer *fb;
 	u16 pixel_alpha = state->pixel_blend_mode;
 	int i, ret;
+	unsigned int block_w, block_h;
 
 	if (!state->crtc || !state->fb)
 		return 0;
@@ -413,13 +414,26 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 	ms->n_planes = fb->format->num_planes;
 	for (i = 0; i < ms->n_planes; i++) {
 		u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
-		if (fb->pitches[i] & (alignment - 1)) {
+
+		if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+				& (alignment - 1)) {
 			DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
 				      fb->pitches[i], i);
 			return -EINVAL;
 		}
 	}
 
+	block_w = drm_format_info_block_width(fb->format, 0);
+	block_h = drm_format_info_block_height(fb->format, 0);
+	if (fb->width % block_w || fb->height % block_h) {
+		DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
+		return -EINVAL;
+	}
+	if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
+		DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
+		return -EINVAL;
+	}
+
 	if ((state->crtc_w > mp->hwdev->max_line_size) ||
 	    (state->crtc_h > mp->hwdev->max_line_size) ||
 	    (state->crtc_w < mp->hwdev->min_line_size) ||
@@ -492,10 +506,18 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
 		num_strides = (mp->hwdev->hw->features &
 			       MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
 
-	for (i = 0; i < num_strides; ++i)
-		malidp_hw_write(mp->hwdev, pitches[i],
+	/*
+	 * The drm convention for pitch is that it needs to cover width * cpp,
+	 * but our hardware wants the pitch/stride to cover all rows included
+	 * in a tile.
+	 */
+	for (i = 0; i < num_strides; ++i) {
+		unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
+
+		malidp_hw_write(mp->hwdev, pitches[i] * block_h,
 				mp->layer->base +
 				mp->layer->stride_offset + i * 4);
+	}
 }
 
 static const s16
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e6c4cd3..bfc6504 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -104,8 +104,6 @@ struct ast_private {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index fe354eb..c168d62 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct ast_private, ttm.bdev);
 }
 
-static int
-ast_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-ast_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int ast_ttm_global_init(struct ast_private *ast)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &ast->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &ast_ttm_mem_global_init;
-	global_ref->release = &ast_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	ast->ttm.bo_global_ref.mem_glob =
-		ast->ttm.mem_global_ref.object;
-	global_ref = &ast->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&ast->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-ast_ttm_global_release(struct ast_private *ast)
-{
-	if (ast->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&ast->ttm.mem_global_ref);
-	ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct ast_bo *bo;
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
 	struct drm_device *dev = ast->dev;
 	struct ttm_bo_device *bdev = &ast->ttm.bdev;
 
-	ret = ast_ttm_global_init(ast);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&ast->ttm.bdev,
-				 ast->ttm.bo_global_ref.ref.object,
 				 &ast_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
 
 	ttm_bo_device_release(&ast->ttm.bdev);
 
-	ast_ttm_global_release(ast);
-
 	arch_phys_wc_del(ast->fb_mtrr);
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
 				pci_resource_len(dev->pdev, 0));
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9e34bce..96f4082 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -364,9 +364,7 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
 
 static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
 	.mode_valid = atmel_hlcdc_crtc_mode_valid,
-	.mode_set = drm_helper_crtc_mode_set,
 	.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
-	.mode_set_base = drm_helper_crtc_mode_set_base,
 	.atomic_check = atmel_hlcdc_crtc_atomic_check,
 	.atomic_begin = atmel_hlcdc_crtc_atomic_begin,
 	.atomic_flush = atmel_hlcdc_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 843cac2..034a911 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -556,7 +556,6 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = atmel_hlcdc_fb_create,
-	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = atmel_hlcdc_dc_atomic_commit,
 };
@@ -658,8 +657,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
 	platform_set_drvdata(pdev, dev);
 
-	drm_fb_cma_fbdev_init(dev, 24, 0);
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
@@ -678,7 +675,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
 {
 	struct atmel_hlcdc_dc *dc = dev->dev_private;
 
-	drm_fb_cma_fbdev_fini(dev);
 	flush_workqueue(dc->wq);
 	drm_kms_helper_poll_fini(dev);
 	drm_atomic_helper_shutdown(dev);
@@ -727,7 +723,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
 			   DRIVER_MODESET | DRIVER_PRIME |
 			   DRIVER_ATOMIC,
-	.lastclose = drm_fb_helper_lastclose,
 	.irq_handler = atmel_hlcdc_dc_irq_handler,
 	.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
 	.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
@@ -763,19 +758,21 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
 
 	ret = atmel_hlcdc_dc_load(ddev);
 	if (ret)
-		goto err_unref;
+		goto err_put;
 
 	ret = drm_dev_register(ddev, 0);
 	if (ret)
 		goto err_unload;
 
+	drm_fbdev_generic_setup(ddev, 24);
+
 	return 0;
 
 err_unload:
 	atmel_hlcdc_dc_unload(ddev);
 
-err_unref:
-	drm_dev_unref(ddev);
+err_put:
+	drm_dev_put(ddev);
 
 	return ret;
 }
@@ -786,7 +783,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
 
 	drm_dev_unregister(ddev);
 	atmel_hlcdc_dc_unload(ddev);
-	drm_dev_unref(ddev);
+	drm_dev_put(ddev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index e7a6907..fb38c8b 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -66,6 +66,7 @@ struct bochs_device {
 	u16 yres_virtual;
 	u32 stride;
 	u32 bpp;
+	struct edid *edid;
 
 	/* drm */
 	struct drm_device  *dev;
@@ -76,8 +77,6 @@ struct bochs_device {
 
 	/* ttm */
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 		bool initialized;
 	} ttm;
@@ -126,6 +125,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
 		      const struct drm_format_info *format);
 void bochs_hw_setbase(struct bochs_device *bochs,
 		      int x, int y, u64 addr);
+int bochs_hw_load_edid(struct bochs_device *bochs);
 
 /* bochs_mm.c */
 int bochs_mm_init(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index cacff73..c90a0d4 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -69,6 +69,35 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs)
 #define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
 #endif
 
+static int bochs_get_edid_block(void *data, u8 *buf,
+				unsigned int block, size_t len)
+{
+	struct bochs_device *bochs = data;
+	size_t i, start = block * EDID_LENGTH;
+
+	if (start + len > 0x400 /* vga register offset */)
+		return -1;
+
+	for (i = 0; i < len; i++) {
+		buf[i] = readb(bochs->mmio + start + i);
+	}
+	return 0;
+}
+
+int bochs_hw_load_edid(struct bochs_device *bochs)
+{
+	if (!bochs->mmio)
+		return -1;
+
+	kfree(bochs->edid);
+	bochs->edid = drm_do_get_edid(&bochs->connector,
+				      bochs_get_edid_block, bochs);
+	if (bochs->edid == NULL)
+		return -1;
+
+	return 0;
+}
+
 int bochs_hw_init(struct drm_device *dev)
 {
 	struct bochs_device *bochs = dev->dev_private;
@@ -164,6 +193,7 @@ void bochs_hw_fini(struct drm_device *dev)
 	if (bochs->fb_map)
 		iounmap(bochs->fb_map);
 	pci_release_regions(dev->pdev);
+	kfree(bochs->edid);
 }
 
 void bochs_hw_setmode(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9bc5b43..f87c284 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,10 +213,17 @@ static void bochs_encoder_init(struct drm_device *dev)
 
 static int bochs_connector_get_modes(struct drm_connector *connector)
 {
-	int count;
+	struct bochs_device *bochs =
+		container_of(connector, struct bochs_device, connector);
+	int count = 0;
 
-	count = drm_add_modes_noedid(connector, 8192, 8192);
-	drm_set_preferred_mode(connector, defx, defy);
+	if (bochs->edid)
+		count = drm_add_edid_modes(connector, bochs->edid);
+
+	if (!count) {
+		count = drm_add_modes_noedid(connector, 8192, 8192);
+		drm_set_preferred_mode(connector, defx, defy);
+	}
 	return count;
 }
 
@@ -271,6 +278,13 @@ static void bochs_connector_init(struct drm_device *dev)
 	drm_connector_helper_add(connector,
 				 &bochs_connector_connector_helper_funcs);
 	drm_connector_register(connector);
+
+	bochs_hw_load_edid(bochs);
+	if (bochs->edid) {
+		DRM_INFO("Found EDID data blob.\n");
+		drm_connector_attach_edid_property(connector);
+		drm_connector_update_edid_property(connector, bochs->edid);
+	}
 }
 
 
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index a61c1ec..0980411 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct bochs_device, ttm.bdev);
 }
 
-static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int bochs_ttm_global_init(struct bochs_device *bochs)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &bochs->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &bochs_ttm_mem_global_init;
-	global_ref->release = &bochs_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	bochs->ttm.bo_global_ref.mem_glob =
-		bochs->ttm.mem_global_ref.object;
-	global_ref = &bochs->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&bochs->ttm.mem_global_ref);
-		return r;
-	}
-
-	return 0;
-}
-
-static void bochs_ttm_global_release(struct bochs_device *bochs)
-{
-	if (bochs->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&bochs->ttm.mem_global_ref);
-	bochs->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct bochs_bo *bo;
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
 	struct ttm_bo_device *bdev = &bochs->ttm.bdev;
 	int ret;
 
-	ret = bochs_ttm_global_init(bochs);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&bochs->ttm.bdev,
-				 bochs->ttm.bo_global_ref.ref.object,
 				 &bochs_bo_driver,
 				 bochs->dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
 		return;
 
 	ttm_bo_device_release(&bochs->ttm.bdev);
-	bochs_ttm_global_release(bochs);
 	bochs->ttm.initialized = false;
 }
 
@@ -414,7 +353,7 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
 		return ret;
 
 	ret = drm_gem_handle_create(file, gobj, &handle);
-	drm_gem_object_unreference_unlocked(gobj);
+	drm_gem_object_put_unlocked(gobj);
 	if (ret)
 		return ret;
 
@@ -454,6 +393,6 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
 	bo = gem_to_bochs_bo(obj);
 	*offset = bochs_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 	return 0;
 }
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 9eeb8ef..2fee47b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -95,6 +95,7 @@
 	depends on OF
 	select DRM_KMS_HELPER
 	select REGMAP_I2C
+	select I2C_MUX
 	---help---
 	  Silicon Image sii902x bridge chip driver.
 
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2f21d3b..753e961 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1219,12 +1219,12 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
 	 * plat_data->attch return, that's why we record the connector
 	 * point after plat attached.
 	 */
-	 if (dp->plat_data->attach) {
-		 ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
-		 if (ret) {
-			 DRM_ERROR("Failed at platform attch func\n");
-			 return ret;
-		 }
+	if (dp->plat_data->attach) {
+		ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
+		if (ret) {
+			DRM_ERROR("Failed at platform attach func\n");
+			return ret;
+		}
 	}
 
 	if (dp->plat_data->panel) {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a135..bfa9020 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2018 Renesas Electronics
+ *
  * Copyright (C) 2016 Atmel
  *		      Bo Shen <voice.shen@atmel.com>
  *
@@ -21,6 +23,7 @@
  */
 
 #include <linux/gpio/consumer.h>
+#include <linux/i2c-mux.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/regmap.h>
@@ -86,8 +89,49 @@ struct sii902x {
 	struct drm_bridge bridge;
 	struct drm_connector connector;
 	struct gpio_desc *reset_gpio;
+	struct i2c_mux_core *i2cmux;
 };
 
+static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
+{
+	union i2c_smbus_data data;
+	int ret;
+
+	ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+			       I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data);
+
+	if (ret < 0)
+		return ret;
+
+	*val = data.byte;
+	return 0;
+}
+
+static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val)
+{
+	union i2c_smbus_data data;
+
+	data.byte = val;
+
+	return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+				I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA,
+				&data);
+}
+
+static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask,
+					u8 val)
+{
+	int ret;
+	u8 status;
+
+	ret = sii902x_read_unlocked(i2c, reg, &status);
+	if (ret)
+		return ret;
+	status &= ~mask;
+	status |= val & mask;
+	return sii902x_write_unlocked(i2c, reg, status);
+}
+
 static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
 {
 	return container_of(bridge, struct sii902x, bridge);
@@ -135,41 +179,11 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
 static int sii902x_get_modes(struct drm_connector *connector)
 {
 	struct sii902x *sii902x = connector_to_sii902x(connector);
-	struct regmap *regmap = sii902x->regmap;
 	u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
-	struct device *dev = &sii902x->i2c->dev;
-	unsigned long timeout;
-	unsigned int retries;
-	unsigned int status;
 	struct edid *edid;
-	int num = 0;
-	int ret;
+	int num = 0, ret;
 
-	ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
-				 SII902X_SYS_CTRL_DDC_BUS_REQ,
-				 SII902X_SYS_CTRL_DDC_BUS_REQ);
-	if (ret)
-		return ret;
-
-	timeout = jiffies +
-		  msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
-	do {
-		ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
-		if (ret)
-			return ret;
-	} while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
-		 time_before(jiffies, timeout));
-
-	if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
-		dev_err(dev, "failed to acquire the i2c bus\n");
-		return -ETIMEDOUT;
-	}
-
-	ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
-	if (ret)
-		return ret;
-
-	edid = drm_get_edid(connector, sii902x->i2c->adapter);
+	edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
 	drm_connector_update_edid_property(connector, edid);
 	if (edid) {
 		num = drm_add_edid_modes(connector, edid);
@@ -181,42 +195,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
 	if (ret)
 		return ret;
 
-	/*
-	 * Sometimes the I2C bus can stall after failure to use the
-	 * EDID channel. Retry a few times to see if things clear
-	 * up, else continue anyway.
-	 */
-	retries = 5;
-	do {
-		ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
-				  &status);
-		retries--;
-	} while (ret && retries);
-	if (ret)
-		dev_err(dev, "failed to read status (%d)\n", ret);
-
-	ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
-				 SII902X_SYS_CTRL_DDC_BUS_REQ |
-				 SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
-	if (ret)
-		return ret;
-
-	timeout = jiffies +
-		  msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
-	do {
-		ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
-		if (ret)
-			return ret;
-	} while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
-			   SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
-		 time_before(jiffies, timeout));
-
-	if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
-		      SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
-		dev_err(dev, "failed to release the i2c bus\n");
-		return -ETIMEDOUT;
-	}
-
 	return num;
 }
 
@@ -366,6 +344,121 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+/*
+ * The purpose of sii902x_i2c_bypass_select is to enable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+	struct sii902x *sii902x = i2c_mux_priv(mux);
+	struct device *dev = &sii902x->i2c->dev;
+	unsigned long timeout;
+	u8 status;
+	int ret;
+
+	ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+					   SII902X_SYS_CTRL_DDC_BUS_REQ,
+					   SII902X_SYS_CTRL_DDC_BUS_REQ);
+	if (ret)
+		return ret;
+
+	timeout = jiffies +
+		  msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+	do {
+		ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+					    &status);
+		if (ret)
+			return ret;
+	} while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+		 time_before(jiffies, timeout));
+
+	if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+		dev_err(dev, "Failed to acquire the i2c bus\n");
+		return -ETIMEDOUT;
+	}
+
+	return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+				      status);
+}
+
+/*
+ * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+	struct sii902x *sii902x = i2c_mux_priv(mux);
+	struct device *dev = &sii902x->i2c->dev;
+	unsigned long timeout;
+	unsigned int retries;
+	u8 status;
+	int ret;
+
+	/*
+	 * When the HDMI transmitter is in pass through mode, we need an
+	 * (undocumented) additional delay between STOP and START conditions
+	 * to guarantee the bus won't get stuck.
+	 */
+	udelay(30);
+
+	/*
+	 * Sometimes the I2C bus can stall after failure to use the
+	 * EDID channel. Retry a few times to see if things clear
+	 * up, else continue anyway.
+	 */
+	retries = 5;
+	do {
+		ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+					    &status);
+		retries--;
+	} while (ret && retries);
+	if (ret) {
+		dev_err(dev, "failed to read status (%d)\n", ret);
+		return ret;
+	}
+
+	ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+					   SII902X_SYS_CTRL_DDC_BUS_REQ |
+					   SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+	if (ret)
+		return ret;
+
+	timeout = jiffies +
+		  msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+	do {
+		ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+					    &status);
+		if (ret)
+			return ret;
+	} while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+			   SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+		 time_before(jiffies, timeout));
+
+	if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+		      SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+		dev_err(dev, "failed to release the i2c bus\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
 static int sii902x_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
@@ -375,6 +468,13 @@ static int sii902x_probe(struct i2c_client *client,
 	u8 chipid[4];
 	int ret;
 
+	ret = i2c_check_functionality(client->adapter,
+				      I2C_FUNC_SMBUS_BYTE_DATA);
+	if (!ret) {
+		dev_err(dev, "I2C adapter not suitable\n");
+		return -EIO;
+	}
+
 	sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
 	if (!sii902x)
 		return -ENOMEM;
@@ -433,7 +533,15 @@ static int sii902x_probe(struct i2c_client *client,
 
 	i2c_set_clientdata(client, sii902x);
 
-	return 0;
+	sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
+					1, 0, I2C_MUX_GATE,
+					sii902x_i2c_bypass_select,
+					sii902x_i2c_bypass_deselect);
+	if (!sii902x->i2cmux)
+		return -ENOMEM;
+
+	sii902x->i2cmux->priv = sii902x;
+	return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
 }
 
 static int sii902x_remove(struct i2c_client *client)
@@ -441,6 +549,7 @@ static int sii902x_remove(struct i2c_client *client)
 {
 	struct sii902x *sii902x = i2c_get_clientdata(client);
 
+	i2c_mux_del_adapters(sii902x->i2cmux);
 	drm_bridge_remove(&sii902x->bridge);
 
 	return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 5971976..64c3cf0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1664,6 +1664,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
 	case 0x131a:
 	case 0x132a:
 	case 0x201a:
+	case 0x212a:
 		count = 1;
 		break;
 	default:
@@ -1957,7 +1958,6 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
 
 static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
 	.get_modes = dw_hdmi_connector_get_modes,
-	.best_encoder = drm_atomic_helper_best_encoder,
 };
 
 static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2205,7 +2205,9 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
 	unsigned int i;
 	u8 phy_type;
 
-	phy_type = hdmi_readb(hdmi, HDMI_CONFIG2_ID);
+	phy_type = hdmi->plat_data->phy_force_vendor ?
+				DW_HDMI_PHY_VENDOR_PHY :
+				hdmi_readb(hdmi, HDMI_CONFIG2_ID);
 
 	if (phy_type == DW_HDMI_PHY_VENDOR_PHY) {
 		/* Vendor PHYs require support from the glue layer. */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index fd79996..2f4b145 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -230,10 +230,21 @@ struct dw_mipi_dsi {
 	u32 format;
 	unsigned long mode_flags;
 
+	struct dw_mipi_dsi *master; /* dual-dsi master ptr */
+	struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
+
 	const struct dw_mipi_dsi_plat_data *plat_data;
 };
 
 /*
+ * Check if either a link to a master or slave is present
+ */
+static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
+{
+	return dsi->slave || dsi->master;
+}
+
+/*
  * The controller should generate 2 frames before
  * preparing the peripheral.
  */
@@ -270,6 +281,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
 				   struct mipi_dsi_device *device)
 {
 	struct dw_mipi_dsi *dsi = host_to_dsi(host);
+	const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
 	struct drm_bridge *bridge;
 	struct drm_panel *panel;
 	int ret;
@@ -300,6 +312,12 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
 
 	drm_bridge_add(&dsi->bridge);
 
+	if (pdata->host_ops && pdata->host_ops->attach) {
+		ret = pdata->host_ops->attach(pdata->priv_data, device);
+		if (ret < 0)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -307,6 +325,14 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
 				   struct mipi_dsi_device *device)
 {
 	struct dw_mipi_dsi *dsi = host_to_dsi(host);
+	const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
+	int ret;
+
+	if (pdata->host_ops && pdata->host_ops->detach) {
+		ret = pdata->host_ops->detach(pdata->priv_data, device);
+		if (ret < 0)
+			return ret;
+	}
 
 	drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
 
@@ -441,10 +467,17 @@ static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
 	}
 
 	dw_mipi_message_config(dsi, msg);
+	if (dsi->slave)
+		dw_mipi_message_config(dsi->slave, msg);
 
 	ret = dw_mipi_dsi_write(dsi, &packet);
 	if (ret)
 		return ret;
+	if (dsi->slave) {
+		ret = dw_mipi_dsi_write(dsi->slave, &packet);
+		if (ret)
+			return ret;
+	}
 
 	if (msg->rx_buf && msg->rx_len) {
 		ret = dw_mipi_dsi_read(dsi, msg);
@@ -583,7 +616,11 @@ static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
 	 * DSI_VNPCR.NPSIZE... especially because this driver supports
 	 * non-burst video modes, see dw_mipi_dsi_video_mode_config()...
 	 */
-	dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
+
+	dsi_write(dsi, DSI_VID_PKT_SIZE,
+		       dw_mipi_is_dual_mode(dsi) ?
+				VID_PKT_SIZE(mode->hdisplay / 2) :
+				VID_PKT_SIZE(mode->hdisplay));
 }
 
 static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
@@ -755,24 +792,43 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
 	 */
 	dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
 
+	if (dsi->slave) {
+		dw_mipi_dsi_disable(dsi->slave);
+		clk_disable_unprepare(dsi->slave->pclk);
+		pm_runtime_put(dsi->slave->dev);
+	}
 	dw_mipi_dsi_disable(dsi);
+
 	clk_disable_unprepare(dsi->pclk);
 	pm_runtime_put(dsi->dev);
 }
 
-static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
-					struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
+static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
 {
-	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+	/* this instance is the slave, so add the master's lanes */
+	if (dsi->master)
+		return dsi->master->lanes + dsi->lanes;
+
+	/* this instance is the master, so add the slave's lanes */
+	if (dsi->slave)
+		return dsi->lanes + dsi->slave->lanes;
+
+	/* single-dsi, so no other instance to consider */
+	return dsi->lanes;
+}
+
+static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
+				struct drm_display_mode *adjusted_mode)
+{
 	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
 	void *priv_data = dsi->plat_data->priv_data;
 	int ret;
+	u32 lanes = dw_mipi_dsi_get_lanes(dsi);
 
 	clk_prepare_enable(dsi->pclk);
 
 	ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
-				     dsi->lanes, dsi->format, &dsi->lane_mbps);
+				     lanes, dsi->format, &dsi->lane_mbps);
 	if (ret)
 		DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
 
@@ -804,12 +860,25 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 	dw_mipi_dsi_set_mode(dsi, 0);
 }
 
+static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+	dw_mipi_dsi_mode_set(dsi, adjusted_mode);
+	if (dsi->slave)
+		dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
+}
+
 static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 
 	/* Switch to video mode for panel-bridge enable & panel enable */
 	dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
+	if (dsi->slave)
+		dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
 }
 
 static enum drm_mode_status
@@ -941,9 +1010,25 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
 
 static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
 {
+	mipi_dsi_host_unregister(&dsi->dsi_host);
+
 	pm_runtime_disable(dsi->dev);
 }
 
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
+{
+	/* introduce controllers to each other */
+	dsi->slave = slave;
+	dsi->slave->master = dsi;
+
+	/* migrate settings for already attached displays */
+	dsi->slave->lanes = dsi->lanes;
+	dsi->slave->channel = dsi->channel;
+	dsi->slave->format = dsi->format;
+	dsi->slave->mode_flags = dsi->mode_flags;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_set_slave);
+
 /*
  * Probe/remove API, used from platforms based on the DRM bridge API.
  */
@@ -957,8 +1042,6 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
 
 void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
 {
-	mipi_dsi_host_unregister(&dsi->dsi_host);
-
 	__dw_mipi_dsi_remove(dsi);
 }
 EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
@@ -966,31 +1049,22 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
 /*
  * Bind/unbind API, used from platforms based on the component framework.
  */
-struct dw_mipi_dsi *
-dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
-		 const struct dw_mipi_dsi_plat_data *plat_data)
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
 {
-	struct dw_mipi_dsi *dsi;
 	int ret;
 
-	dsi = __dw_mipi_dsi_probe(pdev, plat_data);
-	if (IS_ERR(dsi))
-		return dsi;
-
 	ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
 	if (ret) {
-		dw_mipi_dsi_remove(dsi);
 		DRM_ERROR("Failed to initialize bridge with drm\n");
-		return ERR_PTR(ret);
+		return ret;
 	}
 
-	return dsi;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
 
 void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi)
 {
-	__dw_mipi_dsi_remove(dsi);
 }
 EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
 
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index ee6b98e..afd4910 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -379,7 +379,7 @@ static void tc358764_detach(struct drm_bridge *bridge)
 	drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
 	drm_panel_detach(ctx->panel);
 	ctx->panel = NULL;
-	drm_connector_unreference(&ctx->connector);
+	drm_connector_put(&ctx->connector);
 }
 
 static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index a29f87e..f2b2e0d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -136,8 +136,6 @@ struct cirrus_device {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 	bool mm_inited;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 68ab182..4dd499c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -169,7 +169,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 	struct drm_mode_fb_cmd2 mode_cmd;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
-	struct cirrus_bo *bo = NULL;
 	int size, ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -185,8 +184,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 		return ret;
 	}
 
-	bo = gem_to_cirrus_bo(gobj);
-
 	sysram = vmalloc(size);
 	if (!sysram)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f219532..e075810 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct cirrus_device, ttm.bdev);
 }
 
-static int
-cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &cirrus->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &cirrus_ttm_mem_global_init;
-	global_ref->release = &cirrus_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	cirrus->ttm.bo_global_ref.mem_glob =
-		cirrus->ttm.mem_global_ref.object;
-	global_ref = &cirrus->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-cirrus_ttm_global_release(struct cirrus_device *cirrus)
-{
-	if (cirrus->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&cirrus->ttm.mem_global_ref);
-	cirrus->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct cirrus_bo *bo;
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
 	struct drm_device *dev = cirrus->dev;
 	struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
 
-	ret = cirrus_ttm_global_init(cirrus);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&cirrus->ttm.bdev,
-				 cirrus->ttm.bo_global_ref.ref.object,
 				 &cirrus_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
 
 	ttm_bo_device_release(&cirrus->ttm.bdev);
 
-	cirrus_ttm_global_release(cirrus);
-
 	arch_phys_wc_del(cirrus->fb_mtrr);
 	cirrus->fb_mtrr = 0;
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbdd..48ec378 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -315,9 +315,11 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 }
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
-static int drm_atomic_crtc_check(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
+static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
+				 const struct drm_crtc_state *new_crtc_state)
 {
+	struct drm_crtc *crtc = new_crtc_state->crtc;
+
 	/* NOTE: we explicitly don't enforce constraints such as primary
 	 * layer covering entire screen, since that is something we want
 	 * to allow (on hw that supports it).  For hw that does not, it
@@ -326,7 +328,7 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
 	 * TODO: Add generic modeset state checks once we support those.
 	 */
 
-	if (state->active && !state->enable) {
+	if (new_crtc_state->active && !new_crtc_state->enable) {
 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
 				 crtc->base.id, crtc->name);
 		return -EINVAL;
@@ -336,14 +338,14 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
 	 * as this is a kernel-internal detail that userspace should never
 	 * be able to trigger. */
 	if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
-	    WARN_ON(state->enable && !state->mode_blob)) {
+	    WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
 				 crtc->base.id, crtc->name);
 		return -EINVAL;
 	}
 
 	if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
-	    WARN_ON(!state->enable && state->mode_blob)) {
+	    WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
 				 crtc->base.id, crtc->name);
 		return -EINVAL;
@@ -359,7 +361,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
 	 * and legacy page_flip IOCTL which also reject service on a disabled
 	 * pipe.
 	 */
-	if (state->event && !state->active && !crtc->state->active) {
+	if (new_crtc_state->event &&
+	    !new_crtc_state->active && !old_crtc_state->active) {
 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
 				 crtc->base.id, crtc->name);
 		return -EINVAL;
@@ -395,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
 {
 	struct drm_crtc_state *crtc_state;
 	struct drm_writeback_job *writeback_job = state->writeback_job;
+	const struct drm_display_info *info = &connector->display_info;
+
+	state->max_bpc = info->bpc ? info->bpc : 8;
+	if (connector->max_bpc_property)
+		state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
 
 	if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
 		return 0;
@@ -489,14 +497,13 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_plane_state);
 
 static bool
-plane_switching_crtc(struct drm_atomic_state *state,
-		     struct drm_plane *plane,
-		     struct drm_plane_state *plane_state)
+plane_switching_crtc(const struct drm_plane_state *old_plane_state,
+		     const struct drm_plane_state *new_plane_state)
 {
-	if (!plane->state->crtc || !plane_state->crtc)
+	if (!old_plane_state->crtc || !new_plane_state->crtc)
 		return false;
 
-	if (plane->state->crtc == plane_state->crtc)
+	if (old_plane_state->crtc == new_plane_state->crtc)
 		return false;
 
 	/* This could be refined, but currently there's no helper or driver code
@@ -509,88 +516,117 @@ plane_switching_crtc(struct drm_atomic_state *state,
 
 /**
  * drm_atomic_plane_check - check plane state
- * @plane: plane to check
- * @state: plane state to check
+ * @old_plane_state: old plane state to check
+ * @new_plane_state: new plane state to check
  *
  * Provides core sanity checks for plane state.
  *
  * RETURNS:
  * Zero on success, error code on failure
  */
-static int drm_atomic_plane_check(struct drm_plane *plane,
-		struct drm_plane_state *state)
+static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
+				  const struct drm_plane_state *new_plane_state)
 {
+	struct drm_plane *plane = new_plane_state->plane;
+	struct drm_crtc *crtc = new_plane_state->crtc;
+	const struct drm_framebuffer *fb = new_plane_state->fb;
 	unsigned int fb_width, fb_height;
+	struct drm_mode_rect *clips;
+	uint32_t num_clips;
 	int ret;
 
 	/* either *both* CRTC and FB must be set, or neither */
-	if (state->crtc && !state->fb) {
+	if (crtc && !fb) {
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
 				 plane->base.id, plane->name);
 		return -EINVAL;
-	} else if (state->fb && !state->crtc) {
+	} else if (fb && !crtc) {
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
 				 plane->base.id, plane->name);
 		return -EINVAL;
 	}
 
 	/* if disabled, we don't care about the rest of the state: */
-	if (!state->crtc)
+	if (!crtc)
 		return 0;
 
 	/* Check whether this plane is usable on this CRTC */
-	if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+	if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
 		DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
-				 state->crtc->base.id, state->crtc->name,
+				 crtc->base.id, crtc->name,
 				 plane->base.id, plane->name);
 		return -EINVAL;
 	}
 
 	/* Check whether this plane supports the fb pixel format. */
-	ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
-					   state->fb->modifier);
+	ret = drm_plane_check_pixel_format(plane, fb->format->format,
+					   fb->modifier);
 	if (ret) {
 		struct drm_format_name_buf format_name;
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
 				 plane->base.id, plane->name,
-				 drm_get_format_name(state->fb->format->format,
+				 drm_get_format_name(fb->format->format,
 						     &format_name),
-				 state->fb->modifier);
+				 fb->modifier);
 		return ret;
 	}
 
 	/* Give drivers some help against integer overflows */
-	if (state->crtc_w > INT_MAX ||
-	    state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
-	    state->crtc_h > INT_MAX ||
-	    state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+	if (new_plane_state->crtc_w > INT_MAX ||
+	    new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
+	    new_plane_state->crtc_h > INT_MAX ||
+	    new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
 				 plane->base.id, plane->name,
-				 state->crtc_w, state->crtc_h,
-				 state->crtc_x, state->crtc_y);
+				 new_plane_state->crtc_w, new_plane_state->crtc_h,
+				 new_plane_state->crtc_x, new_plane_state->crtc_y);
 		return -ERANGE;
 	}
 
-	fb_width = state->fb->width << 16;
-	fb_height = state->fb->height << 16;
+	fb_width = fb->width << 16;
+	fb_height = fb->height << 16;
 
 	/* Make sure source coordinates are inside the fb. */
-	if (state->src_w > fb_width ||
-	    state->src_x > fb_width - state->src_w ||
-	    state->src_h > fb_height ||
-	    state->src_y > fb_height - state->src_h) {
+	if (new_plane_state->src_w > fb_width ||
+	    new_plane_state->src_x > fb_width - new_plane_state->src_w ||
+	    new_plane_state->src_h > fb_height ||
+	    new_plane_state->src_y > fb_height - new_plane_state->src_h) {
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
 				 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
 				 plane->base.id, plane->name,
-				 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
-				 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
-				 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
-				 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
-				 state->fb->width, state->fb->height);
+				 new_plane_state->src_w >> 16,
+				 ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
+				 new_plane_state->src_h >> 16,
+				 ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
+				 new_plane_state->src_x >> 16,
+				 ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
+				 new_plane_state->src_y >> 16,
+				 ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
+				 fb->width, fb->height);
 		return -ENOSPC;
 	}
 
-	if (plane_switching_crtc(state->state, plane, state)) {
+	clips = drm_plane_get_damage_clips(new_plane_state);
+	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+
+	/* Make sure damage clips are valid and inside the fb. */
+	while (num_clips > 0) {
+		if (clips->x1 >= clips->x2 ||
+		    clips->y1 >= clips->y2 ||
+		    clips->x1 < 0 ||
+		    clips->y1 < 0 ||
+		    clips->x2 > fb_width ||
+		    clips->y2 > fb_height) {
+			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+					 plane->base.id, plane->name, clips->x1,
+					 clips->y1, clips->x2, clips->y2);
+			return -EINVAL;
+		}
+		clips++;
+		num_clips--;
+	}
+
+	if (plane_switching_crtc(old_plane_state, new_plane_state)) {
 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
 				 plane->base.id, plane->name);
 		return -EINVAL;
@@ -927,6 +963,8 @@ int
 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
 			       struct drm_crtc *crtc)
 {
+	const struct drm_crtc_state *old_crtc_state =
+		drm_atomic_get_old_crtc_state(state, crtc);
 	struct drm_plane *plane;
 
 	WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
@@ -934,7 +972,7 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
 	DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
 			 crtc->base.id, crtc->name, state);
 
-	drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+	drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
 		struct drm_plane_state *plane_state =
 			drm_atomic_get_plane_state(state, plane);
 
@@ -961,17 +999,19 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
 	struct drm_device *dev = state->dev;
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_plane *plane;
-	struct drm_plane_state *plane_state;
+	struct drm_plane_state *old_plane_state;
+	struct drm_plane_state *new_plane_state;
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc_state *new_crtc_state;
 	struct drm_connector *conn;
 	struct drm_connector_state *conn_state;
 	int i, ret = 0;
 
 	DRM_DEBUG_ATOMIC("checking %p\n", state);
 
-	for_each_new_plane_in_state(state, plane, plane_state, i) {
-		ret = drm_atomic_plane_check(plane, plane_state);
+	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+		ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
 		if (ret) {
 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
 					 plane->base.id, plane->name);
@@ -979,8 +1019,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
 		}
 	}
 
-	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-		ret = drm_atomic_crtc_check(crtc, crtc_state);
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
 		if (ret) {
 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
 					 crtc->base.id, crtc->name);
@@ -1008,8 +1048,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
 	}
 
 	if (!state->allow_modeset) {
-		for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-			if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
 				DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
 						 crtc->base.id, crtc->name);
 				return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d8b526b..54e2ae6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_writeback.h>
+#include <drm/drm_damage_helper.h>
 #include <linux/dma-fence.h>
 
 #include "drm_crtc_helper_internal.h"
@@ -92,6 +93,17 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
 	}
 }
 
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+static struct drm_encoder *
+pick_single_encoder_for_connector(struct drm_connector *connector)
+{
+	WARN_ON(connector->encoder_ids[1]);
+	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
 static int handle_conflicting_encoders(struct drm_atomic_state *state,
 				       bool disable_conflicting_encoders)
 {
@@ -119,7 +131,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
 		else if (funcs->best_encoder)
 			new_encoder = funcs->best_encoder(connector);
 		else
-			new_encoder = drm_atomic_helper_best_encoder(connector);
+			new_encoder = pick_single_encoder_for_connector(connector);
 
 		if (new_encoder) {
 			if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -336,7 +348,7 @@ update_connector_routing(struct drm_atomic_state *state,
 	else if (funcs->best_encoder)
 		new_encoder = funcs->best_encoder(connector);
 	else
-		new_encoder = drm_atomic_helper_best_encoder(connector);
+		new_encoder = pick_single_encoder_for_connector(connector);
 
 	if (!new_encoder) {
 		DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -658,6 +670,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 			if (old_connector_state->link_status !=
 			    new_connector_state->link_status)
 				new_crtc_state->connectors_changed = true;
+
+			if (old_connector_state->max_requested_bpc !=
+			    new_connector_state->max_requested_bpc)
+				new_crtc_state->connectors_changed = true;
 		}
 
 		if (funcs->atomic_check)
@@ -847,6 +863,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 
 		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
 
+		drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
 		if (!funcs || !funcs->atomic_check)
 			continue;
 
@@ -1445,6 +1463,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
 			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
 				  crtc->base.id, crtc->name);
 	}
+
+	if (old_state->fake_commit)
+		complete_all(&old_state->fake_commit->flip_done);
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
 
@@ -2202,8 +2223,10 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
 		spin_unlock(&crtc->commit_lock);
 	}
 
-	if (old_state->fake_commit)
+	if (old_state->fake_commit) {
 		complete_all(&old_state->fake_commit->cleanup_done);
+		WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
 
@@ -3108,27 +3131,104 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
 	struct drm_modeset_acquire_ctx ctx;
 	int ret;
 
-	drm_modeset_acquire_init(&ctx, 0);
-	while (1) {
-		ret = drm_modeset_lock_all_ctx(dev, &ctx);
-		if (!ret)
-			ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
 
-		if (ret != -EDEADLK)
-			break;
-
-		drm_modeset_backoff(&ctx);
-	}
-
+	ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
 	if (ret)
 		DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
 
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
+	DRM_MODESET_LOCK_ALL_END(ctx, ret);
 }
 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
 
 /**
+ * drm_atomic_helper_duplicate_state - duplicate an atomic state object
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Makes a copy of the current atomic state by looping over all objects and
+ * duplicating their respective states. This is used for example by suspend/
+ * resume support code to save the state prior to suspend such that it can
+ * be restored upon resume.
+ *
+ * Note that this treats atomic state as persistent between save and restore.
+ * Drivers must make sure that this is possible and won't result in confusion
+ * or erroneous behaviour.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * A pointer to the copy of the atomic state object on success or an
+ * ERR_PTR()-encoded error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
+ */
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+				  struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_atomic_state *state;
+	struct drm_connector *conn;
+	struct drm_connector_list_iter conn_iter;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	int err = 0;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return ERR_PTR(-ENOMEM);
+
+	state->acquire_ctx = ctx;
+
+	drm_for_each_crtc(crtc, dev) {
+		struct drm_crtc_state *crtc_state;
+
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state)) {
+			err = PTR_ERR(crtc_state);
+			goto free;
+		}
+	}
+
+	drm_for_each_plane(plane, dev) {
+		struct drm_plane_state *plane_state;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state)) {
+			err = PTR_ERR(plane_state);
+			goto free;
+		}
+	}
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(conn, &conn_iter) {
+		struct drm_connector_state *conn_state;
+
+		conn_state = drm_atomic_get_connector_state(state, conn);
+		if (IS_ERR(conn_state)) {
+			err = PTR_ERR(conn_state);
+			drm_connector_list_iter_end(&conn_iter);
+			goto free;
+		}
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	/* clear the acquire context so that it isn't accidentally reused */
+	state->acquire_ctx = NULL;
+
+free:
+	if (err < 0) {
+		drm_atomic_state_put(state);
+		state = ERR_PTR(err);
+	}
+
+	return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
+
+/**
  * drm_atomic_helper_suspend - subsystem-level suspend helper
  * @dev: DRM device
  *
@@ -3159,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
 	struct drm_atomic_state *state;
 	int err;
 
-	drm_modeset_acquire_init(&ctx, 0);
+	/* This can never be returned, but it makes the compiler happy */
+	state = ERR_PTR(-EINVAL);
 
-retry:
-	err = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (err < 0) {
-		state = ERR_PTR(err);
-		goto unlock;
-	}
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
 
 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
 	if (IS_ERR(state))
@@ -3180,13 +3276,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
 	}
 
 unlock:
-	if (PTR_ERR(state) == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
+	DRM_MODESET_LOCK_ALL_END(ctx, err);
+	if (err)
+		return ERR_PTR(err);
 
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
 	return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_suspend);
@@ -3209,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 					      struct drm_modeset_acquire_ctx *ctx)
 {
-	int i;
+	int i, ret;
 	struct drm_plane *plane;
 	struct drm_plane_state *new_plane_state;
 	struct drm_connector *connector;
@@ -3228,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
 		state->connectors[i].old_state = connector->state;
 
-	return drm_atomic_commit(state);
+	ret = drm_atomic_commit(state);
+
+	state->acquire_ctx = NULL;
+
+	return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
 
@@ -3256,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
 	drm_mode_config_reset(dev);
 
-	drm_modeset_acquire_init(&ctx, 0);
-	while (1) {
-		err = drm_modeset_lock_all_ctx(dev, &ctx);
-		if (err)
-			goto out;
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
 
-		err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
-out:
-		if (err != -EDEADLK)
-			break;
+	err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
 
-		drm_modeset_backoff(&ctx);
-	}
-
+	DRM_MODESET_LOCK_ALL_END(ctx, err);
 	drm_atomic_state_put(state);
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
 
 	return err;
 }
@@ -3413,504 +3499,6 @@ int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
 
 /**
- * drm_atomic_helper_best_encoder - Helper for
- * 	&drm_connector_helper_funcs.best_encoder callback
- * @connector: Connector control structure
- *
- * This is a &drm_connector_helper_funcs.best_encoder callback helper for
- * connectors that support exactly 1 encoder, statically determined at driver
- * init time.
- */
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector)
-{
-	WARN_ON(connector->encoder_ids[1]);
-	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
-
-/**
- * DOC: atomic state reset and initialization
- *
- * Both the drm core and the atomic helpers assume that there is always the full
- * and correct atomic software state for all connectors, CRTCs and planes
- * available. Which is a bit a problem on driver load and also after system
- * suspend. One way to solve this is to have a hardware state read-out
- * infrastructure which reconstructs the full software state (e.g. the i915
- * driver).
- *
- * The simpler solution is to just reset the software state to everything off,
- * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
- * the atomic helpers provide default reset implementations for all hooks.
- *
- * On the upside the precise state tracking of atomic simplifies system suspend
- * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
- * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
- * For other drivers the building blocks are split out, see the documentation
- * for these functions.
- */
-
-/**
- * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
- * @crtc: drm CRTC
- *
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
-{
-	if (crtc->state)
-		__drm_atomic_helper_crtc_destroy_state(crtc->state);
-
-	kfree(crtc->state);
-	crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
-
-	if (crtc->state)
-		crtc->state->crtc = crtc;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
-
-/**
- * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
- * @crtc: CRTC object
- * @state: atomic CRTC state
- *
- * Copies atomic state from a CRTC's current state and resets inferred values.
- * This is useful for drivers that subclass the CRTC state.
- */
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
-					      struct drm_crtc_state *state)
-{
-	memcpy(state, crtc->state, sizeof(*state));
-
-	if (state->mode_blob)
-		drm_property_blob_get(state->mode_blob);
-	if (state->degamma_lut)
-		drm_property_blob_get(state->degamma_lut);
-	if (state->ctm)
-		drm_property_blob_get(state->ctm);
-	if (state->gamma_lut)
-		drm_property_blob_get(state->gamma_lut);
-	state->mode_changed = false;
-	state->active_changed = false;
-	state->planes_changed = false;
-	state->connectors_changed = false;
-	state->color_mgmt_changed = false;
-	state->zpos_changed = false;
-	state->commit = NULL;
-	state->event = NULL;
-	state->pageflip_flags = 0;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
- * @crtc: drm CRTC
- *
- * Default CRTC state duplicate hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
-{
-	struct drm_crtc_state *state;
-
-	if (WARN_ON(!crtc->state))
-		return NULL;
-
-	state = kmalloc(sizeof(*state), GFP_KERNEL);
-	if (state)
-		__drm_atomic_helper_crtc_duplicate_state(crtc, state);
-
-	return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * __drm_atomic_helper_crtc_destroy_state - release CRTC state
- * @state: CRTC state object to release
- *
- * Releases all resources stored in the CRTC state without actually freeing
- * the memory of the CRTC state. This is useful for drivers that subclass the
- * CRTC state.
- */
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
-{
-	if (state->commit) {
-		/*
-		 * In the event that a non-blocking commit returns
-		 * -ERESTARTSYS before the commit_tail work is queued, we will
-		 * have an extra reference to the commit object. Release it, if
-		 * the event has not been consumed by the worker.
-		 *
-		 * state->event may be freed, so we can't directly look at
-		 * state->event->base.completion.
-		 */
-		if (state->event && state->commit->abort_completion)
-			drm_crtc_commit_put(state->commit);
-
-		kfree(state->commit->event);
-		state->commit->event = NULL;
-
-		drm_crtc_commit_put(state->commit);
-	}
-
-	drm_property_blob_put(state->mode_blob);
-	drm_property_blob_put(state->degamma_lut);
-	drm_property_blob_put(state->ctm);
-	drm_property_blob_put(state->gamma_lut);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
-
-/**
- * drm_atomic_helper_crtc_destroy_state - default state destroy hook
- * @crtc: drm CRTC
- * @state: CRTC state object to release
- *
- * Default CRTC state destroy hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
-					  struct drm_crtc_state *state)
-{
-	__drm_atomic_helper_crtc_destroy_state(state);
-	kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
-
-/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
- * @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
- *
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
- */
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
-				     struct drm_plane_state *state)
-{
-	state->plane = plane;
-	state->rotation = DRM_MODE_ROTATE_0;
-
-	state->alpha = DRM_BLEND_ALPHA_OPAQUE;
-	state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
-
-	plane->state = state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
-
-/**
- * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
- * @plane: drm plane
- *
- * Resets the atomic state for @plane by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_plane_reset(struct drm_plane *plane)
-{
-	if (plane->state)
-		__drm_atomic_helper_plane_destroy_state(plane->state);
-
-	kfree(plane->state);
-	plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
-	if (plane->state)
-		__drm_atomic_helper_plane_reset(plane, plane->state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
-
-/**
- * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
- * @plane: plane object
- * @state: atomic plane state
- *
- * Copies atomic state from a plane's current state. This is useful for
- * drivers that subclass the plane state.
- */
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
-					       struct drm_plane_state *state)
-{
-	memcpy(state, plane->state, sizeof(*state));
-
-	if (state->fb)
-		drm_framebuffer_get(state->fb);
-
-	state->fence = NULL;
-	state->commit = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
-
-/**
- * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
- * @plane: drm plane
- *
- * Default plane state duplicate hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
-{
-	struct drm_plane_state *state;
-
-	if (WARN_ON(!plane->state))
-		return NULL;
-
-	state = kmalloc(sizeof(*state), GFP_KERNEL);
-	if (state)
-		__drm_atomic_helper_plane_duplicate_state(plane, state);
-
-	return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
-
-/**
- * __drm_atomic_helper_plane_destroy_state - release plane state
- * @state: plane state object to release
- *
- * Releases all resources stored in the plane state without actually freeing
- * the memory of the plane state. This is useful for drivers that subclass the
- * plane state.
- */
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
-{
-	if (state->fb)
-		drm_framebuffer_put(state->fb);
-
-	if (state->fence)
-		dma_fence_put(state->fence);
-
-	if (state->commit)
-		drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
-
-/**
- * drm_atomic_helper_plane_destroy_state - default state destroy hook
- * @plane: drm plane
- * @state: plane state object to release
- *
- * Default plane state destroy hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
-					   struct drm_plane_state *state)
-{
-	__drm_atomic_helper_plane_destroy_state(state);
-	kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
-
-/**
- * __drm_atomic_helper_connector_reset - reset state on connector
- * @connector: drm connector
- * @conn_state: connector state to assign
- *
- * Initializes the newly allocated @conn_state and assigns it to
- * the &drm_conector->state pointer of @connector, usually required when
- * initializing the drivers or when called from the &drm_connector_funcs.reset
- * hook.
- *
- * This is useful for drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_reset(struct drm_connector *connector,
-				    struct drm_connector_state *conn_state)
-{
-	if (conn_state)
-		conn_state->connector = connector;
-
-	connector->state = conn_state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
-
-/**
- * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
- * @connector: drm connector
- *
- * Resets the atomic state for @connector by freeing the state pointer (which
- * might be NULL, e.g. at driver load time) and allocating a new empty state
- * object.
- */
-void drm_atomic_helper_connector_reset(struct drm_connector *connector)
-{
-	struct drm_connector_state *conn_state =
-		kzalloc(sizeof(*conn_state), GFP_KERNEL);
-
-	if (connector->state)
-		__drm_atomic_helper_connector_destroy_state(connector->state);
-
-	kfree(connector->state);
-	__drm_atomic_helper_connector_reset(connector, conn_state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
-
-/**
- * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
- * @connector: connector object
- * @state: atomic connector state
- *
- * Copies atomic state from a connector's current state. This is useful for
- * drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
-					    struct drm_connector_state *state)
-{
-	memcpy(state, connector->state, sizeof(*state));
-	if (state->crtc)
-		drm_connector_get(connector);
-	state->commit = NULL;
-
-	/* Don't copy over a writeback job, they are used only once */
-	state->writeback_job = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
- * @connector: drm connector
- *
- * Default connector state duplicate hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
-{
-	struct drm_connector_state *state;
-
-	if (WARN_ON(!connector->state))
-		return NULL;
-
-	state = kmalloc(sizeof(*state), GFP_KERNEL);
-	if (state)
-		__drm_atomic_helper_connector_duplicate_state(connector, state);
-
-	return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_duplicate_state - duplicate an atomic state object
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Makes a copy of the current atomic state by looping over all objects and
- * duplicating their respective states. This is used for example by suspend/
- * resume support code to save the state prior to suspend such that it can
- * be restored upon resume.
- *
- * Note that this treats atomic state as persistent between save and restore.
- * Drivers must make sure that this is possible and won't result in confusion
- * or erroneous behaviour.
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * A pointer to the copy of the atomic state object on success or an
- * ERR_PTR()-encoded error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
- */
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
-				  struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_atomic_state *state;
-	struct drm_connector *conn;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	int err = 0;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return ERR_PTR(-ENOMEM);
-
-	state->acquire_ctx = ctx;
-
-	drm_for_each_crtc(crtc, dev) {
-		struct drm_crtc_state *crtc_state;
-
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
-		if (IS_ERR(crtc_state)) {
-			err = PTR_ERR(crtc_state);
-			goto free;
-		}
-	}
-
-	drm_for_each_plane(plane, dev) {
-		struct drm_plane_state *plane_state;
-
-		plane_state = drm_atomic_get_plane_state(state, plane);
-		if (IS_ERR(plane_state)) {
-			err = PTR_ERR(plane_state);
-			goto free;
-		}
-	}
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter) {
-		struct drm_connector_state *conn_state;
-
-		conn_state = drm_atomic_get_connector_state(state, conn);
-		if (IS_ERR(conn_state)) {
-			err = PTR_ERR(conn_state);
-			drm_connector_list_iter_end(&conn_iter);
-			goto free;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	/* clear the acquire context so that it isn't accidentally reused */
-	state->acquire_ctx = NULL;
-
-free:
-	if (err < 0) {
-		drm_atomic_state_put(state);
-		state = ERR_PTR(err);
-	}
-
-	return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
-
-/**
- * __drm_atomic_helper_connector_destroy_state - release connector state
- * @state: connector state object to release
- *
- * Releases all resources stored in the connector state without actually
- * freeing the memory of the connector state. This is useful for drivers that
- * subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
-{
-	if (state->crtc)
-		drm_connector_put(state->connector);
-
-	if (state->commit)
-		drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
-
-/**
- * drm_atomic_helper_connector_destroy_state - default state destroy hook
- * @connector: drm connector
- * @state: connector state object to release
- *
- * Default connector state destroy hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
-					  struct drm_connector_state *state)
-{
-	__drm_atomic_helper_connector_destroy_state(state);
-	kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
-
-/**
  * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
  * @crtc: CRTC object
  * @red: red correction table
@@ -3979,18 +3567,3 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 	return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
- * __drm_atomic_helper_private_duplicate_state - copy atomic private state
- * @obj: CRTC object
- * @state: new private object state
- *
- * Copies atomic state from a private objects's current state and resets inferred values.
- * This is useful for drivers that subclass the private state.
- */
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
-						     struct drm_private_state *state)
-{
-	memcpy(state, obj->state, sizeof(*state));
-}
-EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
new file mode 100644
index 0000000..60bd7d7
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
+
+#include <linux/slab.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ *
+ * On the upside the precise state tracking of atomic simplifies system suspend
+ * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
+ * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
+ * For other drivers the building blocks are split out, see the documentation
+ * for these functions.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+	if (crtc->state)
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+	kfree(crtc->state);
+	crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+
+	if (crtc->state)
+		crtc->state->crtc = crtc;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+					      struct drm_crtc_state *state)
+{
+	memcpy(state, crtc->state, sizeof(*state));
+
+	if (state->mode_blob)
+		drm_property_blob_get(state->mode_blob);
+	if (state->degamma_lut)
+		drm_property_blob_get(state->degamma_lut);
+	if (state->ctm)
+		drm_property_blob_get(state->ctm);
+	if (state->gamma_lut)
+		drm_property_blob_get(state->gamma_lut);
+	state->mode_changed = false;
+	state->active_changed = false;
+	state->planes_changed = false;
+	state->connectors_changed = false;
+	state->color_mgmt_changed = false;
+	state->zpos_changed = false;
+	state->commit = NULL;
+	state->event = NULL;
+	state->pageflip_flags = 0;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *state;
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_crtc_duplicate_state(crtc, state);
+
+	return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
+{
+	if (state->commit) {
+		/*
+		 * In the event that a non-blocking commit returns
+		 * -ERESTARTSYS before the commit_tail work is queued, we will
+		 * have an extra reference to the commit object. Release it, if
+		 * the event has not been consumed by the worker.
+		 *
+		 * state->event may be freed, so we can't directly look at
+		 * state->event->base.completion.
+		 */
+		if (state->event && state->commit->abort_completion)
+			drm_crtc_commit_put(state->commit);
+
+		kfree(state->commit->event);
+		state->commit->event = NULL;
+
+		drm_crtc_commit_put(state->commit);
+	}
+
+	drm_property_blob_put(state->mode_blob);
+	drm_property_blob_put(state->degamma_lut);
+	drm_property_blob_put(state->ctm);
+	drm_property_blob_put(state->gamma_lut);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+					  struct drm_crtc_state *state)
+{
+	__drm_atomic_helper_crtc_destroy_state(state);
+	kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * @plane: plane object, must not be NULL
+ * @state: atomic plane state, must not be NULL
+ *
+ * Initializes plane state to default. This is useful for drivers that subclass
+ * the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+				     struct drm_plane_state *state)
+{
+	state->plane = plane;
+	state->rotation = DRM_MODE_ROTATE_0;
+
+	state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+	state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+
+	plane->state = state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+	if (plane->state)
+		__drm_atomic_helper_plane_destroy_state(plane->state);
+
+	kfree(plane->state);
+	plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+	if (plane->state)
+		__drm_atomic_helper_plane_reset(plane, plane->state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+					       struct drm_plane_state *state)
+{
+	memcpy(state, plane->state, sizeof(*state));
+
+	if (state->fb)
+		drm_framebuffer_get(state->fb);
+
+	state->fence = NULL;
+	state->commit = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct drm_plane_state *state;
+
+	if (WARN_ON(!plane->state))
+		return NULL;
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_plane_duplicate_state(plane, state);
+
+	return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
+{
+	if (state->fb)
+		drm_framebuffer_put(state->fb);
+
+	if (state->fence)
+		dma_fence_put(state->fence);
+
+	if (state->commit)
+		drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+					   struct drm_plane_state *state)
+{
+	__drm_atomic_helper_plane_destroy_state(state);
+	kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * __drm_atomic_helper_connector_reset - reset state on connector
+ * @connector: drm connector
+ * @conn_state: connector state to assign
+ *
+ * Initializes the newly allocated @conn_state and assigns it to
+ * the &drm_conector->state pointer of @connector, usually required when
+ * initializing the drivers or when called from the &drm_connector_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_reset(struct drm_connector *connector,
+				    struct drm_connector_state *conn_state)
+{
+	if (conn_state)
+		conn_state->connector = connector;
+
+	connector->state = conn_state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+	struct drm_connector_state *conn_state =
+		kzalloc(sizeof(*conn_state), GFP_KERNEL);
+
+	if (connector->state)
+		__drm_atomic_helper_connector_destroy_state(connector->state);
+
+	kfree(connector->state);
+	__drm_atomic_helper_connector_reset(connector, conn_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+					    struct drm_connector_state *state)
+{
+	memcpy(state, connector->state, sizeof(*state));
+	if (state->crtc)
+		drm_connector_get(connector);
+	state->commit = NULL;
+
+	/* Don't copy over a writeback job, they are used only once */
+	state->writeback_job = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+	struct drm_connector_state *state;
+
+	if (WARN_ON(!connector->state))
+		return NULL;
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_connector_duplicate_state(connector, state);
+
+	return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
+{
+	if (state->crtc)
+		drm_connector_put(state->connector);
+
+	if (state->commit)
+		drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+					  struct drm_connector_state *state)
+{
+	__drm_atomic_helper_connector_destroy_state(state);
+	kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+						     struct drm_private_state *state)
+{
+	memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index d5b7f31..c408898 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 		ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
 		drm_property_blob_put(mode);
 		return ret;
+	} else if (property == config->prop_vrr_enabled) {
+		state->vrr_enabled = val;
 	} else if (property == config->degamma_lut_property) {
 		ret = drm_atomic_replace_property_blob_from_id(dev,
 					&state->degamma_lut,
@@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
 		*val = state->active;
 	else if (property == config->prop_mode_id)
 		*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+	else if (property == config->prop_vrr_enabled)
+		*val = state->vrr_enabled;
 	else if (property == config->degamma_lut_property)
 		*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
 	else if (property == config->ctm_property)
@@ -513,6 +517,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_mode_config *config = &dev->mode_config;
+	bool replaced = false;
+	int ret;
 
 	if (property == config->prop_fb_id) {
 		struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
@@ -566,6 +572,14 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
 		state->color_encoding = val;
 	} else if (property == plane->color_range_property) {
 		state->color_range = val;
+	} else if (property == config->prop_fb_damage_clips) {
+		ret = drm_atomic_replace_property_blob_from_id(dev,
+					&state->fb_damage_clips,
+					val,
+					-1,
+					sizeof(struct drm_rect),
+					&replaced);
+		return ret;
 	} else if (plane->funcs->atomic_set_property) {
 		return plane->funcs->atomic_set_property(plane, state,
 				property, val);
@@ -621,6 +635,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 		*val = state->color_encoding;
 	} else if (property == plane->color_range_property) {
 		*val = state->color_range;
+	} else if (property == config->prop_fb_damage_clips) {
+		*val = (state->fb_damage_clips) ?
+			state->fb_damage_clips->base.id : 0;
 	} else if (plane->funcs->atomic_get_property) {
 		return plane->funcs->atomic_get_property(plane, state, property, val);
 	} else {
@@ -740,6 +757,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
 
 		return set_out_fence_for_connector(state->state, connector,
 						   fence_ptr);
+	} else if (property == connector->max_bpc_property) {
+		state->max_requested_bpc = val;
 	} else if (connector->funcs->atomic_set_property) {
 		return connector->funcs->atomic_set_property(connector,
 				state, property, val);
@@ -804,6 +823,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
 		*val = 0;
 	} else if (property == config->writeback_out_fence_ptr_property) {
 		*val = 0;
+	} else if (property == connector->max_bpc_property) {
+		*val = state->max_requested_bpc;
 	} else if (connector->funcs->atomic_get_property) {
 		return connector->funcs->atomic_get_property(connector,
 				state, property, val);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 7412aca..d7d10ca 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
 #include <drm/drmP.h>
 #include "drm_legacy.h"
 
+#include <linux/nospec.h>
+
 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
 						  struct drm_local_map *map)
 {
@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
 				  idx, dma->buf_count - 1);
 			return -EINVAL;
 		}
+		idx = array_index_nospec(idx, dma->buf_count);
 		buf = dma->buflist[idx];
 		if (buf->file_priv != file_priv) {
 			DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fc03d26..9b2bd28 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -81,8 +81,7 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
 {
 	int ret;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
-	    !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+	if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
 		return -EOPNOTSUPP;
 
 	if (funcs && !try_module_get(funcs->owner))
@@ -229,8 +228,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
 {
 	struct drm_device *dev = buffer->client->dev;
 
-	if (buffer->vaddr && dev->driver->gem_prime_vunmap)
-		dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+	drm_gem_vunmap(buffer->gem, buffer->vaddr);
 
 	if (buffer->gem)
 		drm_gem_object_put_unlocked(buffer->gem);
@@ -283,9 +281,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
 	 * fd_install step out of the driver backend hooks, to make that
 	 * final step optional for internal users.
 	 */
-	vaddr = dev->driver->gem_prime_vmap(obj);
-	if (!vaddr) {
-		ret = -ENOMEM;
+	vaddr = drm_gem_vmap(obj);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
 		goto err_delete;
 	}
 
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 581cc37..07dcf47 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 	if (crtc_lut->gamma_size != crtc->gamma_size)
 		return -EINVAL;
 
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (ret)
-		goto out;
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
 
 	size = crtc_lut->gamma_size * (sizeof(uint16_t));
 	r_base = crtc->gamma_store;
@@ -284,13 +280,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 				     crtc->gamma_size, &ctx);
 
 out:
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
+	DRM_MODESET_LOCK_ALL_END(ctx, ret);
 	return ret;
 
 }
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4943cef..da8ae80 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -260,9 +260,7 @@ int drm_connector_init(struct drm_device *dev,
 
 	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
 	    connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-		drm_object_attach_property(&connector->base,
-					      config->edid_property,
-					      0);
+		drm_connector_attach_edid_property(connector);
 
 	drm_object_attach_property(&connector->base,
 				      config->dpms_property, 0);
@@ -295,6 +293,24 @@ int drm_connector_init(struct drm_device *dev,
 EXPORT_SYMBOL(drm_connector_init);
 
 /**
+ * drm_connector_attach_edid_property - attach edid property.
+ * @connector: the connector
+ *
+ * Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a
+ * edid property attached by default.  This function can be used to
+ * explicitly enable the edid property in these cases.
+ */
+void drm_connector_attach_edid_property(struct drm_connector *connector)
+{
+	struct drm_mode_config *config = &connector->dev->mode_config;
+
+	drm_object_attach_property(&connector->base,
+				   config->edid_property,
+				   0);
+}
+EXPORT_SYMBOL(drm_connector_attach_edid_property);
+
+/**
  * drm_connector_attach_encoder - attach a connector to an encoder
  * @connector: connector to attach
  * @encoder: encoder to attach @connector to
@@ -916,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
  *	  is no longer protected and userspace should take appropriate action
  *	  (whatever that might be).
  *
+ * max bpc:
+ *	This range property is used by userspace to limit the bit depth. When
+ *	used the driver would limit the bpc in accordance with the valid range
+ *	supported by the hardware and sink. Drivers to use the function
+ *	drm_connector_attach_max_bpc_property() to create and attach the
+ *	property to the connector during initialization.
+ *
  * Connectors also have one standardized atomic property:
  *
  * CRTC_ID:
@@ -1256,6 +1279,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
 /**
+ * DOC: Variable refresh properties
+ *
+ * Variable refresh rate capable displays can dynamically adjust their
+ * refresh rate by extending the duration of their vertical front porch
+ * until page flip or timeout occurs. This can reduce or remove stuttering
+ * and latency in scenarios where the page flip does not align with the
+ * vblank interval.
+ *
+ * An example scenario would be an application flipping at a constant rate
+ * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
+ * interval and the same contents will be displayed twice. This can be
+ * observed as stuttering for content with motion.
+ *
+ * If variable refresh rate was active on a display that supported a
+ * variable refresh range from 35Hz to 60Hz no stuttering would be observable
+ * for the example scenario. The minimum supported variable refresh rate of
+ * 35Hz is below the page flip frequency and the vertical front porch can
+ * be extended until the page flip occurs. The vblank interval will be
+ * directly aligned to the page flip rate.
+ *
+ * Not all userspace content is suitable for use with variable refresh rate.
+ * Large and frequent changes in vertical front porch duration may worsen
+ * perceived stuttering for input sensitive applications.
+ *
+ * Panel brightness will also vary with vertical front porch duration. Some
+ * panels may have noticeable differences in brightness between the minimum
+ * vertical front porch duration and the maximum vertical front porch duration.
+ * Large and frequent changes in vertical front porch duration may produce
+ * observable flickering for such panels.
+ *
+ * Userspace control for variable refresh rate is supported via properties
+ * on the &drm_connector and &drm_crtc objects.
+ *
+ * "vrr_capable":
+ *	Optional &drm_connector boolean property that drivers should attach
+ *	with drm_connector_attach_vrr_capable_property() on connectors that
+ *	could support variable refresh rates. Drivers should update the
+ *	property value by calling drm_connector_set_vrr_capable_property().
+ *
+ *	Absence of the property should indicate absence of support.
+ *
+ * "vrr_enabled":
+ *	Default &drm_crtc boolean property that notifies the driver that the
+ *	content on the CRTC is suitable for variable refresh rate presentation.
+ *	The driver will take this property as a hint to enable variable
+ *	refresh rate support if the receiver supports it, ie. if the
+ *	"vrr_capable" property is true on the &drm_connector object. The
+ *	vertical front porch duration will be extended until page-flip or
+ *	timeout when enabled.
+ *
+ *	The minimum vertical front porch duration is defined as the vertical
+ *	front porch duration for the current mode.
+ *
+ *	The maximum vertical front porch duration is greater than or equal to
+ *	the minimum vertical front porch duration. The duration is derived
+ *	from the minimum supported variable refresh rate for the connector.
+ *
+ *	The driver may place further restrictions within these minimum
+ *	and maximum bounds.
+ *
+ *	The semantics for the vertical blank timestamp differ when
+ *	variable refresh rate is active. The vertical blank timestamp
+ *	is defined to be an estimate using the current mode's fixed
+ *	refresh rate timings. The semantics for the page-flip event
+ *	timestamp remain the same.
+ */
+
+/**
+ * drm_connector_attach_vrr_capable_property - creates the
+ * vrr_capable property
+ * @connector: connector to create the vrr_capable property on.
+ *
+ * This is used by atomic drivers to add support for querying
+ * variable refresh rate capability for a connector.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_connector_attach_vrr_capable_property(
+	struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop;
+
+	if (!connector->vrr_capable_property) {
+		prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
+			"vrr_capable");
+		if (!prop)
+			return -ENOMEM;
+
+		connector->vrr_capable_property = prop;
+		drm_object_attach_property(&connector->base, prop, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
+
+/**
  * drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
  * @connector: connector to attach scaling mode property on.
  * @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
@@ -1584,6 +1706,58 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
 EXPORT_SYMBOL(drm_connector_set_link_status_property);
 
 /**
+ * drm_connector_attach_max_bpc_property - attach "max bpc" property
+ * @connector: connector to attach max bpc property on.
+ * @min: The minimum bit depth supported by the connector.
+ * @max: The maximum bit depth supported by the connector.
+ *
+ * This is used to add support for limiting the bit depth on a connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+					  int min, int max)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop;
+
+	prop = connector->max_bpc_property;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "max bpc", min, max);
+		if (!prop)
+			return -ENOMEM;
+
+		connector->max_bpc_property = prop;
+	}
+
+	drm_object_attach_property(&connector->base, prop, max);
+	connector->state->max_requested_bpc = max;
+	connector->state->max_bpc = max;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
+
+/**
+ * drm_connector_set_vrr_capable_property - sets the variable refresh rate
+ * capable property for a connector
+ * @connector: drm connector
+ * @capable: True if the connector is variable refresh rate capable
+ *
+ * Should be used by atomic drivers to update the indicated support for
+ * variable refresh rate over a connector.
+ */
+void drm_connector_set_vrr_capable_property(
+		struct drm_connector *connector, bool capable)
+{
+	drm_object_property_set_value(&connector->base,
+				      connector->vrr_capable_property,
+				      capable);
+}
+EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
+
+/**
  * drm_connector_init_panel_orientation_property -
  *	initialize the connecters panel_orientation property
  * @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 268a182..1593dd6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -340,6 +340,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
 		drm_object_attach_property(&crtc->base,
 					   config->prop_out_fence_ptr, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->prop_vrr_enabled, 0);
 	}
 
 	return 0;
@@ -570,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	struct drm_mode_crtc *crtc_req = data;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
-	struct drm_connector **connector_set, *connector;
-	struct drm_framebuffer *fb;
-	struct drm_display_mode *mode;
+	struct drm_connector **connector_set = NULL, *connector;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_display_mode *mode = NULL;
 	struct drm_mode_set set;
 	uint32_t __user *set_connectors_ptr;
 	struct drm_modeset_acquire_ctx ctx;
@@ -599,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	plane = crtc->primary;
 
 	mutex_lock(&crtc->dev->mode_config.mutex);
-	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
-	connector_set = NULL;
-	fb = NULL;
-	mode = NULL;
-
-	ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
-	if (ret)
-		goto out;
+	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
+				   DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
 
 	if (crtc_req->mode_valid) {
 		/* If we have a mode we need a framebuffer. */
@@ -766,13 +761,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	}
 	kfree(connector_set);
 	drm_mode_destroy(dev, mode);
-	if (ret == -EDEADLK) {
-		ret = drm_modeset_backoff(&ctx);
-		if (!ret)
-			goto retry;
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
+
+	/* In case we need to retry... */
+	connector_set = NULL;
+	fb = NULL;
+	mode = NULL;
+
+	DRM_MODESET_LOCK_ALL_END(ctx, ret);
 	mutex_unlock(&crtc->dev->mode_config.mutex);
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ce75e950..a3c8185 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -984,118 +984,3 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
 	drm_modeset_unlock_all(dev);
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
-
-/**
- * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @mode: DRM display mode which userspace requested
- * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set callback
- * required by the CRTC helpers. Besides the atomic plane helper functions for
- * the primary plane the driver must also provide the ->mode_set_nofb callback
- * to set up the CRTC.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
-			     struct drm_display_mode *adjusted_mode, int x, int y,
-			     struct drm_framebuffer *old_fb)
-{
-	struct drm_crtc_state *crtc_state;
-	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-	int ret;
-
-	if (crtc->funcs->atomic_duplicate_state)
-		crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
-	else {
-		if (!crtc->state)
-			drm_atomic_helper_crtc_reset(crtc);
-
-		crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
-	}
-
-	if (!crtc_state)
-		return -ENOMEM;
-
-	crtc_state->planes_changed = true;
-	crtc_state->mode_changed = true;
-	ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
-	if (ret)
-		goto out;
-	drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
-
-	if (crtc_funcs->atomic_check) {
-		ret = crtc_funcs->atomic_check(crtc, crtc_state);
-		if (ret)
-			goto out;
-	}
-
-	swap(crtc->state, crtc_state);
-
-	crtc_funcs->mode_set_nofb(crtc);
-
-	ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
-
-out:
-	if (crtc_state) {
-		if (crtc->funcs->atomic_destroy_state)
-			crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-		else
-			drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set);
-
-/**
- * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set_base used
- * required by the CRTC helpers. The driver must provide the atomic plane helper
- * functions for the primary plane.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-				  struct drm_framebuffer *old_fb)
-{
-	struct drm_plane_state *plane_state;
-	struct drm_plane *plane = crtc->primary;
-
-	if (plane->funcs->atomic_duplicate_state)
-		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else {
-		if (!plane->state)
-			drm_atomic_helper_plane_reset(plane);
-
-		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	}
-	if (!plane_state)
-		return -ENOMEM;
-	plane_state->plane = plane;
-
-	plane_state->crtc = crtc;
-	drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
-	plane_state->crtc_x = 0;
-	plane_state->crtc_y = 0;
-	plane_state->crtc_h = crtc->mode.vdisplay;
-	plane_state->crtc_w = crtc->mode.hdisplay;
-	plane_state->src_x = x << 16;
-	plane_state->src_y = y << 16;
-	plane_state->src_h = crtc->mode.vdisplay << 16;
-	plane_state->src_w = crtc->mode.hdisplay << 16;
-
-	return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
new file mode 100644
index 0000000..d2a1c73
--- /dev/null
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ * Rob Clark <robdclark@gmail.com>
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * FB_DAMAGE_CLIPS is an optional plane property which provides a means to
+ * specify a list of damage rectangles on a plane in framebuffer coordinates of
+ * the framebuffer attached to the plane. In current context damage is the area
+ * of plane framebuffer that has changed since last plane update (also called
+ * page-flip), irrespective of whether currently attached framebuffer is same as
+ * framebuffer attached during last plane update or not.
+ *
+ * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
+ * to optimize internally especially for virtual devices where each framebuffer
+ * change needs to be transmitted over network, usb, etc.
+ *
+ * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
+ * ignore damage clips property and in that case driver will do a full plane
+ * update. In case damage clips are provided then it is guaranteed that the area
+ * inside damage clips will be updated to plane. For efficiency driver can do
+ * full update or can update more than specified in damage clips. Since driver
+ * is free to read more, user-space must always render the entire visible
+ * framebuffer. Otherwise there can be corruptions. Also, if a user-space
+ * provides damage clips which doesn't encompass the actual damage to
+ * framebuffer (since last plane update) can result in incorrect rendering.
+ *
+ * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
+ * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
+ * damage clips are not in 16.16 fixed point. Similar to plane src in
+ * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
+ * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
+ * damage clips, it is strongly discouraged.
+ *
+ * Drivers that are interested in damage interface for plane should enable
+ * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
+ * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
+ * rectangles clipped to &drm_plane_state.src.
+ */
+
+static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
+				      struct drm_mode_rect *dest,
+				      uint32_t num_clips, uint32_t src_inc)
+{
+	while (num_clips > 0) {
+		dest->x1 = src->x1;
+		dest->y1 = src->y1;
+		dest->x2 = src->x2;
+		dest->y2 = src->y2;
+		src += src_inc;
+		dest++;
+		num_clips--;
+	}
+}
+
+/**
+ * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
+ * @plane: Plane on which to enable damage clips property.
+ *
+ * This function lets driver to enable the damage clips property on a plane.
+ */
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_mode_config *config = &dev->mode_config;
+
+	drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
+				   0);
+}
+EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
+
+/**
+ * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
+ * @state: The driver state object.
+ * @plane_state: Plane state for which to verify damage.
+ *
+ * This helper function makes sure that damage from plane state is discarded
+ * for full modeset. If there are more reasons a driver would want to do a full
+ * plane update rather than processing individual damage regions, then those
+ * cases should be taken care of here.
+ *
+ * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
+ * full plane update should happen. It also ensure helper iterator will return
+ * &drm_plane_state.src as damage.
+ */
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+					  struct drm_plane_state *plane_state)
+{
+	struct drm_crtc_state *crtc_state;
+
+	if (plane_state->crtc) {
+		crtc_state = drm_atomic_get_new_crtc_state(state,
+							   plane_state->crtc);
+
+		if (WARN_ON(!crtc_state))
+			return;
+
+		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+			drm_property_blob_put(plane_state->fb_damage_clips);
+			plane_state->fb_damage_clips = NULL;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
+
+/**
+ * drm_atomic_helper_dirtyfb - Helper for dirtyfb.
+ * @fb: DRM framebuffer.
+ * @file_priv: Drm file for the ioctl call.
+ * @flags: Dirty fb annotate flags.
+ * @color: Color for annotate fill.
+ * @clips: Dirty region.
+ * @num_clips: Count of clip in clips.
+ *
+ * A helper to implement &drm_framebuffer_funcs.dirty using damage interface
+ * during plane update. If num_clips is 0 then this helper will do a full plane
+ * update. This is the same behaviour expected by DIRTFB IOCTL.
+ *
+ * Note that this helper is blocking implementation. This is what current
+ * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
+ * to rate-limit userspace and make sure its rendering doesn't get ahead of
+ * uploading new data too much.
+ *
+ * Return: Zero on success, negative errno on failure.
+ */
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+			      struct drm_file *file_priv, unsigned int flags,
+			      unsigned int color, struct drm_clip_rect *clips,
+			      unsigned int num_clips)
+{
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_property_blob *damage = NULL;
+	struct drm_mode_rect *rects = NULL;
+	struct drm_atomic_state *state;
+	struct drm_plane *plane;
+	int ret = 0;
+
+	/*
+	 * When called from ioctl, we are interruptable, but not when called
+	 * internally (ie. defio worker)
+	 */
+	drm_modeset_acquire_init(&ctx,
+		file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
+
+	state = drm_atomic_state_alloc(fb->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	state->acquire_ctx = &ctx;
+
+	if (clips) {
+		uint32_t inc = 1;
+
+		if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+			inc = 2;
+			num_clips /= 2;
+		}
+
+		rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
+		if (!rects) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		convert_clip_rect_to_rect(clips, rects, num_clips, inc);
+		damage = drm_property_create_blob(fb->dev,
+						  num_clips * sizeof(*rects),
+						  rects);
+		if (IS_ERR(damage)) {
+			ret = PTR_ERR(damage);
+			damage = NULL;
+			goto out;
+		}
+	}
+
+retry:
+	drm_for_each_plane(plane, fb->dev) {
+		struct drm_plane_state *plane_state;
+
+		if (plane->state->fb != fb)
+			continue;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state)) {
+			ret = PTR_ERR(plane_state);
+			goto out;
+		}
+
+		drm_property_replace_blob(&plane_state->fb_damage_clips,
+					  damage);
+	}
+
+	ret = drm_atomic_commit(state);
+
+out:
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		ret = drm_modeset_backoff(&ctx);
+		if (!ret)
+			goto retry;
+	}
+
+	drm_property_blob_put(damage);
+	kfree(rects);
+	drm_atomic_state_put(state);
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
+
+/**
+ * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
+ * @iter: The iterator to initialize.
+ * @old_state: Old plane state for validation.
+ * @state: Plane state from which to iterate the damage clips.
+ *
+ * Initialize an iterator, which clips plane damage
+ * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
+ * returns full plane src in case damage is not present because either
+ * user-space didn't sent or driver discarded it (it want to do full plane
+ * update). Currently this iterator returns full plane src in case plane src
+ * changed but that can be changed in future to return damage.
+ *
+ * For the case when plane is not visible or plane update should not happen the
+ * first call to iter_next will return false. Note that this helper use clipped
+ * &drm_plane_state.src, so driver calling this helper should have called
+ * drm_atomic_helper_check_plane_state() earlier.
+ */
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+				   const struct drm_plane_state *old_state,
+				   const struct drm_plane_state *state)
+{
+	memset(iter, 0, sizeof(*iter));
+
+	if (!state || !state->crtc || !state->fb || !state->visible)
+		return;
+
+	iter->clips = drm_helper_get_plane_damage_clips(state);
+	iter->num_clips = drm_plane_get_damage_clips_count(state);
+
+	/* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
+	iter->plane_src.x1 = state->src.x1 >> 16;
+	iter->plane_src.y1 = state->src.y1 >> 16;
+	iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+	iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+	if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+		iter->clips = 0;
+		iter->num_clips = 0;
+		iter->full_update = true;
+	}
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
+
+/**
+ * drm_atomic_helper_damage_iter_next - Advance the damage iterator.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Since plane src is in 16.16 fixed point and damage clips are whole number,
+ * this iterator round off clips that intersect with plane src. Round down for
+ * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
+ * off for full plane src, in case it's returned as damage. This iterator will
+ * skip damage clips outside of plane src.
+ *
+ * Return: True if the output is valid, false if reached the end.
+ *
+ * If the first call to iterator next returns false then it means no need to
+ * update the plane.
+ */
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+				   struct drm_rect *rect)
+{
+	bool ret = false;
+
+	if (iter->full_update) {
+		*rect = iter->plane_src;
+		iter->full_update = false;
+		return true;
+	}
+
+	while (iter->curr_clip < iter->num_clips) {
+		*rect = iter->clips[iter->curr_clip];
+		iter->curr_clip++;
+
+		if (drm_rect_intersect(rect, &iter->plane_src)) {
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 373bd4c..f8468ea 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,8 @@
 #include <drm/drm_debugfs.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_gem.h>
 #include <drm/drmP.h>
 
 #include "drm_internal.h"
@@ -43,6 +45,93 @@
  * Initialization, etc.
  **************************************************/
 
+static int drm_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_minor *minor = node->minor;
+	struct drm_device *dev = minor->dev;
+	struct drm_master *master;
+
+	mutex_lock(&dev->master_mutex);
+	master = dev->master;
+	seq_printf(m, "%s", dev->driver->name);
+	if (dev->dev)
+		seq_printf(m, " dev=%s", dev_name(dev->dev));
+	if (master && master->unique)
+		seq_printf(m, " master=%s", master->unique);
+	if (dev->unique)
+		seq_printf(m, " unique=%s", dev->unique);
+	seq_printf(m, "\n");
+	mutex_unlock(&dev->master_mutex);
+
+	return 0;
+}
+
+static int drm_clients_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_file *priv;
+	kuid_t uid;
+
+	seq_printf(m,
+		   "%20s %5s %3s master a %5s %10s\n",
+		   "command",
+		   "pid",
+		   "dev",
+		   "uid",
+		   "magic");
+
+	/* dev->filelist is sorted youngest first, but we want to present
+	 * oldest first (i.e. kernel, servers, clients), so walk backwardss.
+	 */
+	mutex_lock(&dev->filelist_mutex);
+	list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
+		struct task_struct *task;
+
+		rcu_read_lock(); /* locks pid_task()->comm */
+		task = pid_task(priv->pid, PIDTYPE_PID);
+		uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
+		seq_printf(m, "%20s %5d %3d   %c    %c %5d %10u\n",
+			   task ? task->comm : "<unknown>",
+			   pid_vnr(priv->pid),
+			   priv->minor->index,
+			   drm_is_current_master(priv) ? 'y' : 'n',
+			   priv->authenticated ? 'y' : 'n',
+			   from_kuid_munged(seq_user_ns(m), uid),
+			   priv->magic);
+		rcu_read_unlock();
+	}
+	mutex_unlock(&dev->filelist_mutex);
+	return 0;
+}
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+	struct seq_file *m = data;
+
+	seq_printf(m, "%6d %8zd %7d %8d\n",
+		   obj->name, obj->size,
+		   obj->handle_count,
+		   kref_read(&obj->refcount));
+	return 0;
+}
+
+static int drm_gem_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	seq_printf(m, "  name     size handles refcount\n");
+
+	mutex_lock(&dev->object_name_lock);
+	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+	mutex_unlock(&dev->object_name_lock);
+
+	return 0;
+}
+
 static const struct drm_info_list drm_debugfs_list[] = {
 	{"name", drm_name_info, 0},
 	{"clients", drm_clients_info, 0},
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index 8a718f8..b15cee8 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -424,8 +424,6 @@ void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
 	aux->cec.parent = parent;
 	INIT_DELAYED_WORK(&aux->cec.unregister_work,
 			  drm_dp_cec_unregister_work);
-
-	drm_dp_cec_set_edid(aux, NULL);
 }
 EXPORT_SYMBOL(drm_dp_cec_register_connector);
 
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 37c01b6..2d6c491 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1352,3 +1352,95 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
 	return 0;
 }
 EXPORT_SYMBOL(drm_dp_read_desc);
+
+/**
+ * DRM DP Helpers for DSC
+ */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+				   bool is_edp)
+{
+	u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
+
+	if (is_edp) {
+		/* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
+		if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+			return 4;
+		if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+			return 2;
+		if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+			return 1;
+	} else {
+		/* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
+		u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
+
+		if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
+			return 24;
+		if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
+			return 20;
+		if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
+			return 16;
+		if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
+			return 12;
+		if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
+			return 10;
+		if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
+			return 8;
+		if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
+			return 6;
+		if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+			return 4;
+		if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+			return 2;
+		if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+			return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
+
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+	u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
+
+	switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
+	case DP_DSC_LINE_BUF_BIT_DEPTH_9:
+		return 9;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_10:
+		return 10;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_11:
+		return 11;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_12:
+		return 12;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_13:
+		return 13;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_14:
+		return 14;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_15:
+		return 15;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_16:
+		return 16;
+	case DP_DSC_LINE_BUF_BIT_DEPTH_8:
+		return 8;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
+
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+					 u8 dsc_bpc[3])
+{
+	int num_bpc = 0;
+	u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
+
+	if (color_depth & DP_DSC_12_BPC)
+		dsc_bpc[num_bpc++] = 12;
+	if (color_depth & DP_DSC_10_BPC)
+		dsc_bpc[num_bpc++] = 10;
+	if (color_depth & DP_DSC_8_BPC)
+		dsc_bpc[num_bpc++] = 8;
+
+	return num_bpc;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0e0df39..5294145 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2572,9 +2572,16 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 EXPORT_SYMBOL(drm_dp_mst_get_edid);
 
 /**
- * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
  * @mgr: manager to use
  * @pbn: payload bandwidth to convert into slots.
+ *
+ * Calculate the number of VCPI slots that will be required for the given PBN
+ * value. This function is deprecated, and should not be used in atomic
+ * drivers.
+ *
+ * RETURNS:
+ * The total slots required for this port, or error.
  */
 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
 			   int pbn)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 36e8e9c..12e5e2b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -476,8 +476,6 @@ static void drm_fs_inode_free(struct inode *inode)
  * The initial ref-count of the object is 1. Use drm_dev_get() and
  * drm_dev_put() to take and drop further ref-counts.
  *
- * Note that for purely virtual devices @parent can be NULL.
- *
  * Drivers that do not want to allocate their own device struct
  * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
  * that do embed &struct drm_device it must be placed first in the overall
@@ -502,6 +500,8 @@ int drm_dev_init(struct drm_device *dev,
 		return -ENODEV;
 	}
 
+	BUG_ON(!parent);
+
 	kref_init(&dev->ref);
 	dev->dev = parent;
 	dev->driver = driver;
@@ -556,9 +556,7 @@ int drm_dev_init(struct drm_device *dev,
 		}
 	}
 
-	/* Use the parent device name as DRM device unique identifier, but fall
-	 * back to the driver name for virtual devices like vgem. */
-	ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+	ret = drm_dev_set_unique(dev, dev_name(parent));
 	if (ret)
 		goto err_setunique;
 
@@ -706,19 +704,6 @@ void drm_dev_put(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_dev_put);
 
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This is a compatibility alias for drm_dev_put() and should not be used by new
- * code.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
-	drm_dev_put(dev);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
 static int create_compat_control_link(struct drm_device *dev)
 {
 	struct drm_minor *minor;
@@ -975,14 +960,12 @@ static void drm_core_exit(void)
 	drm_sysfs_destroy();
 	idr_destroy(&drm_minors_idr);
 	drm_connector_ida_destroy();
-	drm_global_release();
 }
 
 static int __init drm_core_init(void)
 {
 	int ret;
 
-	drm_global_init();
 	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
 
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 0000000..bc2b23a
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+	memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+	pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+	pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+				const struct drm_dsc_config *dsc_cfg)
+{
+	int i;
+
+	/* Protect against someone accidently changing struct size */
+	BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+		     DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+	memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+	/* PPS 0 */
+	pps_sdp->pps_payload.dsc_version =
+		dsc_cfg->dsc_version_minor |
+		dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+	/* PPS 1, 2 is 0 */
+
+	/* PPS 3 */
+	pps_sdp->pps_payload.pps_3 =
+		dsc_cfg->line_buf_depth |
+		dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+	/* PPS 4 */
+	pps_sdp->pps_payload.pps_4 =
+		((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT) |
+		dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+		dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+		dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+		dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+	/* PPS 5 */
+	pps_sdp->pps_payload.bits_per_pixel_low =
+		(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+	/*
+	 * The DSC panel expects the PPS packet to have big endian format
+	 * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+	 * to big endian format. If format is little endian, it will swap
+	 * bytes to convert to Big endian else keep it unchanged.
+	 */
+
+	/* PPS 6, 7 */
+	pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+	/* PPS 8, 9 */
+	pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+	/* PPS 10, 11 */
+	pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+	/* PPS 12, 13 */
+	pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+	/* PPS 14, 15 */
+	pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+	/* PPS 16 */
+	pps_sdp->pps_payload.initial_xmit_delay_high =
+		((dsc_cfg->initial_xmit_delay &
+		  DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT);
+
+	/* PPS 17 */
+	pps_sdp->pps_payload.initial_xmit_delay_low =
+		(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+	/* PPS 18, 19 */
+	pps_sdp->pps_payload.initial_dec_delay =
+		cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+	/* PPS 20 is 0 */
+
+	/* PPS 21 */
+	pps_sdp->pps_payload.initial_scale_value =
+		dsc_cfg->initial_scale_value;
+
+	/* PPS 22, 23 */
+	pps_sdp->pps_payload.scale_increment_interval =
+		cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+	/* PPS 24 */
+	pps_sdp->pps_payload.scale_decrement_interval_high =
+		((dsc_cfg->scale_decrement_interval &
+		  DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+		 DSC_PPS_MSB_SHIFT);
+
+	/* PPS 25 */
+	pps_sdp->pps_payload.scale_decrement_interval_low =
+		(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+	/* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+	/* PPS 27 */
+	pps_sdp->pps_payload.first_line_bpg_offset =
+		dsc_cfg->first_line_bpg_offset;
+
+	/* PPS 28, 29 */
+	pps_sdp->pps_payload.nfl_bpg_offset =
+		cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+	/* PPS 30, 31 */
+	pps_sdp->pps_payload.slice_bpg_offset =
+		cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+	/* PPS 32, 33 */
+	pps_sdp->pps_payload.initial_offset =
+		cpu_to_be16(dsc_cfg->initial_offset);
+
+	/* PPS 34, 35 */
+	pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+	/* PPS 36 */
+	pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+	/* PPS 37 */
+	pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+	/* PPS 38, 39 */
+	pps_sdp->pps_payload.rc_model_size =
+		cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+	/* PPS 40 */
+	pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+	/* PPS 41 */
+	pps_sdp->pps_payload.rc_quant_incr_limit0 =
+		dsc_cfg->rc_quant_incr_limit0;
+
+	/* PPS 42 */
+	pps_sdp->pps_payload.rc_quant_incr_limit1 =
+		dsc_cfg->rc_quant_incr_limit1;
+
+	/* PPS 43 */
+	pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+		DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+	/* PPS 44 - 57 */
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+		pps_sdp->pps_payload.rc_buf_thresh[i] =
+			dsc_cfg->rc_buf_thresh[i];
+
+	/* PPS 58 - 87 */
+	/*
+	 * For DSC sink programming the RC Range parameter fields
+	 * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+	 */
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		pps_sdp->pps_payload.rc_range_parameters[i] =
+			((dsc_cfg->rc_range_params[i].range_min_qp <<
+			  DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+			 (dsc_cfg->rc_range_params[i].range_max_qp <<
+			  DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+			 (dsc_cfg->rc_range_params[i].range_bpg_offset));
+		pps_sdp->pps_payload.rc_range_parameters[i] =
+			cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+	}
+
+	/* PPS 88 */
+	pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+		dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+	/* PPS 89 */
+	pps_sdp->pps_payload.second_line_bpg_offset =
+		dsc_cfg->second_line_bpg_offset;
+
+	/* PPS 90, 91 */
+	pps_sdp->pps_payload.nsl_bpg_offset =
+		cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+	/* PPS 92, 93 */
+	pps_sdp->pps_payload.second_line_offset_adj =
+		cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+	/* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fb0dfc6..5b51661 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -72,7 +72,9 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
 /**
- * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
+ * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer, for pixel
+ * formats where values are grouped in blocks this will get you the beginning of
+ * the block
  * @fb: The framebuffer
  * @state: Which state of drm plane
  * @plane: Which plane
@@ -87,6 +89,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
 	struct drm_gem_cma_object *obj;
 	dma_addr_t paddr;
 	u8 h_div = 1, v_div = 1;
+	u32 block_w = drm_format_info_block_width(fb->format, plane);
+	u32 block_h = drm_format_info_block_height(fb->format, plane);
+	u32 block_size = fb->format->char_per_block[plane];
+	u32 sample_x;
+	u32 sample_y;
+	u32 block_start_y;
+	u32 num_hblocks;
 
 	obj = drm_fb_cma_get_gem_obj(fb, plane);
 	if (!obj)
@@ -99,8 +108,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
 		v_div = fb->format->vsub;
 	}
 
-	paddr += (fb->format->cpp[plane] * (state->src_x >> 16)) / h_div;
-	paddr += (fb->pitches[plane] * (state->src_y >> 16)) / v_div;
+	sample_x = (state->src_x >> 16) / h_div;
+	sample_y = (state->src_y >> 16) / v_div;
+	block_start_y = (sample_y / block_h) * block_h;
+	num_hblocks = sample_x / block_w;
+
+	paddr += fb->pitches[plane] * block_start_y;
+	paddr += block_size * num_hblocks;
 
 	return paddr;
 }
@@ -124,10 +138,7 @@ int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
 
 	/* dev->fb_helper will indirectly point to fbdev_cma after this call */
 	fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
-	if (IS_ERR(fbdev_cma))
-		return PTR_ERR(fbdev_cma);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(fbdev_cma);
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
 
@@ -226,21 +237,3 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
 		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
-
-/**
- * drm_fbdev_cma_set_suspend_unlocked - wrapper around
- *                                      drm_fb_helper_set_suspend_unlocked
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- * @state: desired state, zero to resume, non-zero to suspend
- *
- * Calls drm_fb_helper_set_suspend, which is a wrapper around
- * fb_set_suspend implemented by fbdev core.
- */
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
-					bool state)
-{
-	if (fbdev_cma)
-		drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
-						   state);
-}
-EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9d64f87..d3af098 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1635,6 +1635,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 	if (var->pixclock != 0 || in_dbg_master())
 		return -EINVAL;
 
+	if ((drm_format_info_block_width(fb->format, 0) > 1) ||
+	    (drm_format_info_block_height(fb->format, 0) > 1))
+		return -EINVAL;
+
 	/*
 	 * Changes struct fb_var_screeninfo are currently not pushed back
 	 * to KMS, hence fail if different settings are requested.
@@ -1952,6 +1956,8 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
 {
 	struct drm_framebuffer *fb = fb_helper->fb;
 
+	WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) ||
+		(drm_format_info_block_height(fb->format, 0) > 1));
 	info->pseudo_palette = fb_helper->pseudo_palette;
 	info->var.xres_virtual = fb->width;
 	info->var.yres_virtual = fb->height;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 8aaa5e8..d90ee03 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -103,8 +103,8 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
  *
  * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
  * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
- * and depending on the quirk_addfb_prefer_host_byte_order flag it returns
- * little endian byte order or host byte order framebuffer formats.
+ * and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag
+ * it returns little endian byte order or host byte order framebuffer formats.
  */
 uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
 				     uint32_t bpp, uint32_t depth)
@@ -224,7 +224,20 @@ const struct drm_format_info *__drm_format_info(u32 format)
 		{ .format = DRM_FORMAT_YVYU,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
 		{ .format = DRM_FORMAT_UYVY,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
 		{ .format = DRM_FORMAT_VYUY,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+		{ .format = DRM_FORMAT_XYUV8888,	.depth = 0,  .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
 		{ .format = DRM_FORMAT_AYUV,		.depth = 0,  .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+		{ .format = DRM_FORMAT_Y0L0,		.depth = 0,  .num_planes = 1,
+		  .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+		  .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+		{ .format = DRM_FORMAT_X0L0,		.depth = 0,  .num_planes = 1,
+		  .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+		  .hsub = 2, .vsub = 2, .is_yuv = true },
+		{ .format = DRM_FORMAT_Y0L2,		.depth = 0,  .num_planes = 1,
+		  .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+		  .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+		{ .format = DRM_FORMAT_X0L2,		.depth = 0,  .num_planes = 1,
+		  .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+		  .hsub = 2, .vsub = 2, .is_yuv = true },
 	};
 
 	unsigned int i;
@@ -400,3 +413,65 @@ int drm_format_plane_height(int height, uint32_t format, int plane)
 	return height / info->vsub;
 }
 EXPORT_SYMBOL(drm_format_plane_height);
+
+/**
+ * drm_format_info_block_width - width in pixels of block.
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The width in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+					 int plane)
+{
+	if (!info || plane < 0 || plane >= info->num_planes)
+		return 0;
+
+	if (!info->block_w[plane])
+		return 1;
+	return info->block_w[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_width);
+
+/**
+ * drm_format_info_block_height - height in pixels of a block
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The height in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+					  int plane)
+{
+	if (!info || plane < 0 || plane >= info->num_planes)
+		return 0;
+
+	if (!info->block_h[plane])
+		return 1;
+	return info->block_h[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_height);
+
+/**
+ * drm_format_info_min_pitch - computes the minimum required pitch in bytes
+ * @info: pixel format info
+ * @plane: plane index
+ * @buffer_width: buffer width in pixels
+ *
+ * Returns:
+ * The minimum required pitch in bytes for a buffer by taking into consideration
+ * the pixel format information and the buffer width.
+ */
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+				   int plane, unsigned int buffer_width)
+{
+	if (!info || plane < 0 || plane >= info->num_planes)
+		return 0;
+
+	return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane],
+			    drm_format_info_block_width(info, plane) *
+			    drm_format_info_block_height(info, plane));
+}
+EXPORT_SYMBOL(drm_format_info_min_pitch);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 3bf729d..fcaea8f 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -195,20 +195,26 @@ static int framebuffer_check(struct drm_device *dev,
 	for (i = 0; i < info->num_planes; i++) {
 		unsigned int width = fb_plane_width(r->width, info, i);
 		unsigned int height = fb_plane_height(r->height, info, i);
-		unsigned int cpp = info->cpp[i];
+		unsigned int block_size = info->char_per_block[i];
+		u64 min_pitch = drm_format_info_min_pitch(info, i, width);
+
+		if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
+			DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+			return -EINVAL;
+		}
 
 		if (!r->handles[i]) {
 			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
 			return -EINVAL;
 		}
 
-		if ((uint64_t) width * cpp > UINT_MAX)
+		if (min_pitch > UINT_MAX)
 			return -ERANGE;
 
 		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
 			return -ERANGE;
 
-		if (r->pitches[i] < width * cpp) {
+		if (block_size && r->pitches[i] < min_pitch) {
 			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
 			return -EINVAL;
 		}
@@ -317,6 +323,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
 
 	return fb;
 }
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_internal_framebuffer_create);
 
 /**
  * drm_mode_addfb2 - add an FB to the graphics configuration
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 512078e..8b55ece 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
 	struct drm_gem_object *obj = ptr;
 	struct drm_device *dev = obj->dev;
 
-	if (dev->driver->gem_close_object)
+	if (obj->funcs && obj->funcs->close)
+		obj->funcs->close(obj, file_priv);
+	else if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, file_priv);
 
 	if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 	if (ret)
 		goto err_remove;
 
-	if (dev->driver->gem_open_object) {
+	if (obj->funcs && obj->funcs->open) {
+		ret = obj->funcs->open(obj, file_priv);
+		if (ret)
+			goto err_revoke;
+	} else if (dev->driver->gem_open_object) {
 		ret = dev->driver->gem_open_object(obj, file_priv);
 		if (ret)
 			goto err_revoke;
@@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
 		container_of(kref, struct drm_gem_object, refcount);
 	struct drm_device *dev = obj->dev;
 
-	if (dev->driver->gem_free_object_unlocked) {
+	if (obj->funcs) {
+		obj->funcs->free(obj);
+	} else if (dev->driver->gem_free_object_unlocked) {
 		dev->driver->gem_free_object_unlocked(obj);
 	} else if (dev->driver->gem_free_object) {
 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
 
 	dev = obj->dev;
 
-	if (dev->driver->gem_free_object_unlocked) {
-		kref_put(&obj->refcount, drm_gem_object_free);
-	} else {
+	if (dev->driver->gem_free_object) {
 		might_lock(&dev->struct_mutex);
 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
 				&dev->struct_mutex))
 			mutex_unlock(&dev->struct_mutex);
+	} else {
+		kref_put(&obj->refcount, drm_gem_object_free);
 	}
 }
 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 	if (obj_size < vma->vm_end - vma->vm_start)
 		return -EINVAL;
 
-	if (!dev->driver->gem_vm_ops)
+	if (obj->funcs && obj->funcs->vm_ops)
+		vma->vm_ops = obj->funcs->vm_ops;
+	else if (dev->driver->gem_vm_ops)
+		vma->vm_ops = dev->driver->gem_vm_ops;
+	else
 		return -EINVAL;
 
 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_ops = dev->driver->gem_vm_ops;
 	vma->vm_private_data = obj;
 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
 	drm_printf_indent(p, indent, "imported=%s\n",
 			  obj->import_attach ? "yes" : "no");
 
-	if (obj->dev->driver->gem_print_info)
+	if (obj->funcs && obj->funcs->print_info)
+		obj->funcs->print_info(p, indent, obj);
+	else if (obj->dev->driver->gem_print_info)
 		obj->dev->driver->gem_print_info(p, indent, obj);
 }
+
+/**
+ * drm_gem_pin - Pin backing buffer in memory
+ * @obj: GEM object
+ *
+ * Make sure the backing buffer is pinned in memory.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+	if (obj->funcs && obj->funcs->pin)
+		return obj->funcs->pin(obj);
+	else if (obj->dev->driver->gem_prime_pin)
+		return obj->dev->driver->gem_prime_pin(obj);
+	else
+		return 0;
+}
+EXPORT_SYMBOL(drm_gem_pin);
+
+/**
+ * drm_gem_unpin - Unpin backing buffer from memory
+ * @obj: GEM object
+ *
+ * Relax the requirement that the backing buffer is pinned in memory.
+ */
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+	if (obj->funcs && obj->funcs->unpin)
+		obj->funcs->unpin(obj);
+	else if (obj->dev->driver->gem_prime_unpin)
+		obj->dev->driver->gem_prime_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_unpin);
+
+/**
+ * drm_gem_vmap - Map buffer into kernel virtual address space
+ * @obj: GEM object
+ *
+ * Returns:
+ * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+void *drm_gem_vmap(struct drm_gem_object *obj)
+{
+	void *vaddr;
+
+	if (obj->funcs && obj->funcs->vmap)
+		vaddr = obj->funcs->vmap(obj);
+	else if (obj->dev->driver->gem_prime_vmap)
+		vaddr = obj->dev->driver->gem_prime_vmap(obj);
+	else
+		vaddr = ERR_PTR(-EOPNOTSUPP);
+
+	if (!vaddr)
+		vaddr = ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_vmap);
+
+/**
+ * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
+ * @obj: GEM object
+ * @vaddr: Virtual address (can be NULL)
+ */
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	if (!vaddr)
+		return;
+
+	if (obj->funcs && obj->funcs->vunmap)
+		obj->funcs->vunmap(obj, vaddr);
+	else if (obj->dev->driver->gem_prime_vunmap)
+		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d2ced8..cc26625 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,6 +176,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  *
  * This function frees the backing memory of the CMA GEM object, cleans up the
  * GEM object state and frees the memory used to store the object itself.
+ * If the buffer is imported and the virtual address is set, it is released.
  * Drivers using the CMA helpers should set this as their
  * &drm_driver.gem_free_object_unlocked callback.
  */
@@ -189,6 +190,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 		dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
 			    cma_obj->vaddr, cma_obj->paddr);
 	} else if (gem_obj->import_attach) {
+		if (cma_obj->vaddr)
+			dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
 		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
 	}
 
@@ -575,3 +578,86 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 	/* Nothing to do */
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
+
+static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
+	.free = drm_gem_cma_free_object,
+	.print_info = drm_gem_cma_print_info,
+	.get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.vmap = drm_gem_cma_prime_vmap,
+	.vm_ops = &drm_gem_cma_vm_ops,
+};
+
+/**
+ * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
+ *                                           default function table
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This sets the GEM object functions to the default CMA helper functions.
+ * This function can be used as the &drm_driver.gem_create_object callback.
+ *
+ * Returns:
+ * A pointer to a allocated GEM object or an error pointer on failure.
+ */
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_cma_object *cma_obj;
+
+	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+	if (!cma_obj)
+		return NULL;
+
+	cma_obj->base.funcs = &drm_cma_gem_default_funcs;
+
+	return &cma_obj->base;
+}
+EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
+
+/**
+ * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ *	scatter/gather table and get the virtual address of the buffer
+ * @dev: DRM device
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table using
+ * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a CMA GEM object always has its virtual
+ * address set. This address is released when the object is freed.
+ *
+ * This function can be used as the &drm_driver.gem_prime_import_sg_table
+ * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
+ * the necessary DRM driver operations.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+				       struct dma_buf_attachment *attach,
+				       struct sg_table *sgt)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *obj;
+	void *vaddr;
+
+	vaddr = dma_buf_vmap(attach->dmabuf);
+	if (!vaddr) {
+		DRM_ERROR("Failed to vmap PRIME buffer\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+	if (IS_ERR(obj)) {
+		dma_buf_vunmap(attach->dmabuf, vaddr);
+		return obj;
+	}
+
+	cma_obj = to_drm_gem_cma_obj(obj);
+	cma_obj->vaddr = vaddr;
+
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index ded7a37..acb466d 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -171,7 +171,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
 		}
 
 		min_size = (height - 1) * mode_cmd->pitches[i]
-			 + width * info->cpp[i]
+			 + drm_format_info_min_pitch(info, i, width)
 			 + mode_cmd->offsets[i];
 
 		if (objs[i]->size < min_size) {
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
deleted file mode 100644
index 5799e27..0000000
--- a/drivers/gpu/drm/drm_global.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <drm/drm_global.h>
-
-struct drm_global_item {
-	struct mutex mutex;
-	void *object;
-	int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
-	int i;
-
-	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-		struct drm_global_item *item = &glob[i];
-		mutex_init(&item->mutex);
-		item->object = NULL;
-		item->refcount = 0;
-	}
-}
-
-void drm_global_release(void)
-{
-	int i;
-	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
-		struct drm_global_item *item = &glob[i];
-		BUG_ON(item->object != NULL);
-		BUG_ON(item->refcount != 0);
-	}
-}
-
-/**
- * drm_global_item_ref - Initialize and acquire reference to memory
- * object
- * @ref: Object for initialization
- *
- * This initializes a memory object, allocating memory and calling the
- * .init() hook. Further calls will increase the reference count for
- * that item.
- *
- * Returns:
- * Zero on success, non-zero otherwise.
- */
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
-	int ret = 0;
-	struct drm_global_item *item = &glob[ref->global_type];
-
-	mutex_lock(&item->mutex);
-	if (item->refcount == 0) {
-		ref->object = kzalloc(ref->size, GFP_KERNEL);
-		if (unlikely(ref->object == NULL)) {
-			ret = -ENOMEM;
-			goto error_unlock;
-		}
-		ret = ref->init(ref);
-		if (unlikely(ret != 0))
-			goto error_free;
-
-		item->object = ref->object;
-	} else {
-		ref->object = item->object;
-	}
-
-	++item->refcount;
-	mutex_unlock(&item->mutex);
-	return 0;
-
-error_free:
-	kfree(ref->object);
-	ref->object = NULL;
-error_unlock:
-	mutex_unlock(&item->mutex);
-	return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-/**
- * drm_global_item_unref - Drop reference to memory
- * object
- * @ref: Object being removed
- *
- * Drop a reference to the memory object and eventually call the
- * release() hook.  The allocated object should be dropped in the
- * release() hook or before calling this function
- *
- */
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
-	struct drm_global_item *item = &glob[ref->global_type];
-
-	mutex_lock(&item->mutex);
-	BUG_ON(item->refcount == 0);
-	BUG_ON(ref->object != item->object);
-	if (--item->refcount == 0) {
-		ref->release(ref);
-		item->object = NULL;
-	}
-	mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
deleted file mode 100644
index 6b68e90..0000000
--- a/drivers/gpu/drm/drm_info.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_minor *minor = node->minor;
-	struct drm_device *dev = minor->dev;
-	struct drm_master *master;
-
-	mutex_lock(&dev->master_mutex);
-	master = dev->master;
-	seq_printf(m, "%s", dev->driver->name);
-	if (dev->dev)
-		seq_printf(m, " dev=%s", dev_name(dev->dev));
-	if (master && master->unique)
-		seq_printf(m, " master=%s", master->unique);
-	if (dev->unique)
-		seq_printf(m, " unique=%s", dev->unique);
-	seq_printf(m, "\n");
-	mutex_unlock(&dev->master_mutex);
-
-	return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_file *priv;
-	kuid_t uid;
-
-	seq_printf(m,
-		   "%20s %5s %3s master a %5s %10s\n",
-		   "command",
-		   "pid",
-		   "dev",
-		   "uid",
-		   "magic");
-
-	/* dev->filelist is sorted youngest first, but we want to present
-	 * oldest first (i.e. kernel, servers, clients), so walk backwardss.
-	 */
-	mutex_lock(&dev->filelist_mutex);
-	list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
-		struct task_struct *task;
-
-		rcu_read_lock(); /* locks pid_task()->comm */
-		task = pid_task(priv->pid, PIDTYPE_PID);
-		uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
-		seq_printf(m, "%20s %5d %3d   %c    %c %5d %10u\n",
-			   task ? task->comm : "<unknown>",
-			   pid_vnr(priv->pid),
-			   priv->minor->index,
-			   drm_is_current_master(priv) ? 'y' : 'n',
-			   priv->authenticated ? 'y' : 'n',
-			   from_kuid_munged(seq_user_ns(m), uid),
-			   priv->magic);
-		rcu_read_unlock();
-	}
-	mutex_unlock(&dev->filelist_mutex);
-	return 0;
-}
-
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
-	struct drm_gem_object *obj = ptr;
-	struct seq_file *m = data;
-
-	seq_printf(m, "%6d %8zd %7d %8d\n",
-		   obj->name, obj->size,
-		   obj->handle_count,
-		   kref_read(&obj->refcount));
-	return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-
-	seq_printf(m, "  name     size handles refcount\n");
-
-	mutex_lock(&dev->object_name_lock);
-	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
-	mutex_unlock(&dev->object_name_lock);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 51e06de..d9caf20 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,11 +56,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
 void drm_minor_release(struct drm_minor *minor);
 
-/* drm_info.c */
-int drm_name_info(struct seq_file *m, void *data);
-int drm_clients_info(struct seq_file *m, void* data);
-int drm_gem_name_info(struct seq_file *m, void *data);
-
 /* drm_vblank.c */
 void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
 void drm_vblank_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index c61680a..99cba8e 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -39,7 +39,6 @@ struct drm_master *drm_lease_owner(struct drm_master *master)
 		master = master->lessor;
 	return master;
 }
-EXPORT_SYMBOL(drm_lease_owner);
 
 /**
  * _drm_find_lessee - find lessee by id (idr_mutex held)
@@ -117,7 +116,6 @@ bool _drm_lease_held(struct drm_file *file_priv, int id)
 
 	return _drm_lease_held_master(file_priv->master, id);
 }
-EXPORT_SYMBOL(_drm_lease_held);
 
 /**
  * drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
@@ -144,7 +142,6 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
 	mutex_unlock(&master->dev->mode_config.idr_mutex);
 	return ret;
 }
-EXPORT_SYMBOL(drm_lease_held);
 
 /**
  * drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
@@ -184,7 +181,6 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
 	mutex_unlock(&master->dev->mode_config.idr_mutex);
 	return crtcs_out;
 }
-EXPORT_SYMBOL(drm_lease_filter_crtcs);
 
 /*
  * drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
@@ -195,7 +191,7 @@ EXPORT_SYMBOL(drm_lease_filter_crtcs);
  * make sure all of the desired objects can be leased, atomically
  * leasing them to the new drmmaster.
  *
- * 	ERR_PTR(-EACCESS)	some other master holds the title to any object
+ * 	ERR_PTR(-EACCES)	some other master holds the title to any object
  * 	ERR_PTR(-ENOENT)	some object is not a valid DRM object for this device
  * 	ERR_PTR(-EBUSY)		some other lessee holds title to this object
  *	ERR_PTR(-EEXIST)	same object specified more than once in the provided list
@@ -357,9 +353,9 @@ void drm_lease_revoke(struct drm_master *top)
 }
 
 static int validate_lease(struct drm_device *dev,
-			  struct drm_file *lessor_priv,
 			  int object_count,
-			  struct drm_mode_object **objects)
+			  struct drm_mode_object **objects,
+			  bool universal_planes)
 {
 	int o;
 	int has_crtc = -1;
@@ -376,14 +372,14 @@ static int validate_lease(struct drm_device *dev,
 		if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
 			has_connector = o;
 
-		if (lessor_priv->universal_planes) {
+		if (universal_planes) {
 			if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
 				has_plane = o;
 		}
 	}
 	if (has_crtc == -1 || has_connector == -1)
 		return -EINVAL;
-	if (lessor_priv->universal_planes && has_plane == -1)
+	if (universal_planes && has_plane == -1)
 		return -EINVAL;
 	return 0;
 }
@@ -397,6 +393,8 @@ static int fill_object_idr(struct drm_device *dev,
 	struct drm_mode_object **objects;
 	u32 o;
 	int ret;
+	bool universal_planes = READ_ONCE(lessor_priv->universal_planes);
+
 	objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
 			  GFP_KERNEL);
 	if (!objects)
@@ -419,14 +417,17 @@ static int fill_object_idr(struct drm_device *dev,
 		}
 
 		if (!drm_mode_object_lease_required(objects[o]->type)) {
+			DRM_DEBUG_KMS("invalid object for lease\n");
 			ret = -EINVAL;
 			goto out_free_objects;
 		}
 	}
 
-	ret = validate_lease(dev, lessor_priv, object_count, objects);
-	if (ret)
+	ret = validate_lease(dev, object_count, objects, universal_planes);
+	if (ret) {
+		DRM_DEBUG_LEASE("lease validation failed\n");
 		goto out_free_objects;
+	}
 
 	/* add their IDs to the lease request - taking into account
 	   universal planes */
@@ -449,7 +450,7 @@ static int fill_object_idr(struct drm_device *dev,
 					object_id, ret);
 			goto out_free_objects;
 		}
-		if (obj->type == DRM_MODE_OBJECT_CRTC && !lessor_priv->universal_planes) {
+		if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
 			struct drm_crtc *crtc = obj_to_crtc(obj);
 			ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
 			if (ret < 0) {
@@ -509,15 +510,21 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 		return -EOPNOTSUPP;
 
 	/* Do not allow sub-leases */
-	if (lessor->lessor)
+	if (lessor->lessor) {
+		DRM_DEBUG_LEASE("recursive leasing not allowed\n");
 		return -EINVAL;
+	}
 
 	/* need some objects */
-	if (cl->object_count == 0)
+	if (cl->object_count == 0) {
+		DRM_DEBUG_LEASE("no objects in lease\n");
 		return -EINVAL;
+	}
 
-	if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK)))
+	if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
+		DRM_DEBUG_LEASE("invalid flags\n");
 		return -EINVAL;
+	}
 
 	object_count = cl->object_count;
 
@@ -532,6 +539,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 			      object_count, object_ids);
 	kfree(object_ids);
 	if (ret) {
+		DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
 		idr_destroy(&leases);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index d69e4fc..40c4349 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -51,7 +51,7 @@
 #endif
 
 static void *agp_remap(unsigned long offset, unsigned long size,
-		       struct drm_device * dev)
+		       struct drm_device *dev)
 {
 	unsigned long i, num_pages =
 	    PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,26 +94,26 @@ static void *agp_remap(unsigned long offset, unsigned long size,
 }
 
 /** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory * handle, int pages)
+void drm_free_agp(struct agp_memory *handle, int pages)
 {
 	agp_free_memory(handle);
 }
 
 /** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory *handle, unsigned int start)
 {
 	return agp_bind_memory(handle, start);
 }
 
 /** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory * handle)
+int drm_unbind_agp(struct agp_memory *handle)
 {
 	return agp_unbind_memory(handle);
 }
 
 #else /*  CONFIG_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
-			      struct drm_device * dev)
+			      struct drm_device *dev)
 {
 	return NULL;
 }
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index ee80788..703bfce 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,6 +297,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
 		return -ENOMEM;
 	dev->mode_config.prop_crtc_id = prop;
 
+	prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
+				   0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_fb_damage_clips = prop;
+
 	prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
 			"ACTIVE");
 	if (!prop)
@@ -310,6 +316,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
 		return -ENOMEM;
 	dev->mode_config.prop_mode_id = prop;
 
+	prop = drm_property_create_bool(dev, 0,
+			"VRR_ENABLED");
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_vrr_enabled = prop;
+
 	prop = drm_property_create(dev,
 			DRM_MODE_PROP_BLOB,
 			"DEGAMMA_LUT", 0);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index be8b754..cd9bc0c 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -38,7 +38,8 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
 	int ret;
 
 	mutex_lock(&dev->mode_config.idr_mutex);
-	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL,
+			1, 0, GFP_KERNEL);
 	if (ret >= 0) {
 		/*
 		 * Set up the object linking under the protection of the idr
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 02db9ac..24a7504 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -716,8 +716,8 @@ int of_get_drm_display_mode(struct device_node *np,
 	if (bus_flags)
 		drm_bus_flags_from_videomode(&vm, bus_flags);
 
-	pr_debug("%pOF: got %dx%d display mode from %s\n",
-		np, vm.hactive, vm.vactive, np->name);
+	pr_debug("%pOF: got %dx%d display mode\n",
+		np, vm.hactive, vm.vactive);
 	drm_mode_debug_printmodeline(dmode);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index f1c24ab..9150fa3 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -146,6 +146,21 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
  * Initialize a CRTC object with a default helper-provided primary plane and no
  * cursor plane.
  *
+ * Note that we make some assumptions about hardware limitations that may not be
+ * true for all hardware:
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ * 5. The primary plane must always be on if the CRTC is enabled.
+ *
+ * This is purely a backwards compatibility helper for old drivers. Drivers
+ * should instead implement their own primary plane. Atomic drivers must do so.
+ * Drivers with the above hardware restriction can look into using &struct
+ * drm_simple_display_pipe, which encapsulates the above limitations into a nice
+ * interface.
+ *
  * Returns:
  * Zero on success, error code on failure.
  */
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 8a51006..51f534d 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -56,6 +56,10 @@
  *     drm_modeset_drop_locks(ctx);
  *     drm_modeset_acquire_fini(ctx);
  *
+ * For convenience this control flow is implemented in
+ * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
+ * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
+ *
  * If all that is needed is a single modeset lock, then the &struct
  * drm_modeset_acquire_ctx is not needed and the locking can be simplified
  * by passing a NULL instead of ctx in the drm_modeset_lock() call or
@@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock);
  * Locks acquired with this function should be released by calling the
  * drm_modeset_drop_locks() function on @ctx.
  *
+ * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
+ *
  * Returns: 0 on success or a negative error-code on failure.
  */
 int drm_modeset_lock_all_ctx(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ee4a5e1..52e445b 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -59,6 +59,14 @@ static const struct drm_dmi_panel_orientation_data gpd_win = {
 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data gpd_win2 = {
+	.width = 720,
+	.height = 1280,
+	.bios_dates = (const char * const []){
+		"12/07/2017", "05/24/2018", "06/29/2018", NULL },
+	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
 	.width = 800,
 	.height = 1280,
@@ -106,6 +114,14 @@ static const struct dmi_system_id orientation_data[] = {
 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
 		},
 		.driver_data = (void *)&gpd_win,
+	}, {	/* GPD Win 2 (too generic strings, also match on bios date) */
+		.matches = {
+		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+		},
+		.driver_data = (void *)&gpd_win2,
 	}, {	/* I.T.Works TW891 */
 		.matches = {
 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 48f615d..a9d9df6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,15 +61,14 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
 		return NULL;
 
 	dmah->size = size;
-	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+	dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
+						GFP_KERNEL | __GFP_COMP);
 
 	if (dmah->vaddr == NULL) {
 		kfree(dmah);
 		return NULL;
 	}
 
-	memset(dmah->vaddr, 0, size);
-
 	/* XXX - Is virt_to_page() legal for consistent mem? */
 	/* Reserve */
 	for (addr = (unsigned long)dmah->vaddr, sz = size;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 1fa98bd..5f650d8 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -636,6 +636,29 @@ static int __setplane_check(struct drm_plane *plane,
 	return 0;
 }
 
+/**
+ * drm_any_plane_has_format - Check whether any plane supports this format and modifier combination
+ * @dev: DRM device
+ * @format: pixel format (DRM_FORMAT_*)
+ * @modifier: data layout modifier
+ *
+ * Returns:
+ * Whether at least one plane supports the specified format and modifier combination.
+ */
+bool drm_any_plane_has_format(struct drm_device *dev,
+			      u32 format, u64 modifier)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, dev) {
+		if (drm_plane_check_pixel_format(plane, format, modifier) == 0)
+			return true;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(drm_any_plane_has_format);
+
 /*
  * __setplane_internal - setplane handler for internal callers
  *
@@ -744,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane,
 	struct drm_modeset_acquire_ctx ctx;
 	int ret;
 
-	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
-	ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
-	if (ret)
-		goto fail;
+	DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
+				   DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
 
 	if (drm_drv_uses_atomic_modeset(plane->dev))
 		ret = __setplane_atomic(plane, crtc, fb,
@@ -759,14 +779,7 @@ static int setplane_internal(struct drm_plane *plane,
 					  crtc_x, crtc_y, crtc_w, crtc_h,
 					  src_x, src_y, src_w, src_h, &ctx);
 
-fail:
-	if (ret == -EDEADLK) {
-		ret = drm_modeset_backoff(&ctx);
-		if (!ret)
-			goto retry;
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
+	DRM_MODESET_LOCK_ALL_END(ctx, ret);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index a393756..0fff72d 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -42,11 +42,8 @@
  * primary plane support on top of the normal CRTC configuration interface.
  * Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
  * plane together with the CRTC state this does not allow userspace to disable
- * the primary plane itself.  To avoid too much duplicated code use
- * drm_plane_helper_check_update() which can be used to enforce the same
- * restrictions as primary planes had thus. The default primary plane only
- * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
- * framebuffer.
+ * the primary plane itself. The default primary plane only expose XRBG8888 and
+ * ARGB8888 as valid pixel formats for the attached framebuffer.
  *
  * Drivers are highly recommended to implement proper support for primary
  * planes, and newly merged drivers must not rely upon these transitional
@@ -100,43 +97,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
 	return count;
 }
 
-/**
- * drm_plane_helper_check_update() - Check plane update for validity
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @src: source coordinates in 16.16 fixed point
- * @dst: integer destination coordinates
- * @rotation: plane rotation
- * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
- * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
- * @can_position: is it legal to position the plane such that it
- *                doesn't cover the entire crtc?  This will generally
- *                only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
- *                       is disabled?
- * @visible: output parameter indicating whether plane is still visible after
- *           clipping
- *
- * Checks that a desired plane update is valid.  Drivers that provide
- * their own plane handling rather than helper-provided implementations may
- * still wish to call this function to avoid duplication of error checking
- * code.
- *
- * RETURNS:
- * Zero if update appears valid, error code on failure
- */
-int drm_plane_helper_check_update(struct drm_plane *plane,
-				  struct drm_crtc *crtc,
-				  struct drm_framebuffer *fb,
-				  struct drm_rect *src,
-				  struct drm_rect *dst,
-				  unsigned int rotation,
-				  int min_scale,
-				  int max_scale,
-				  bool can_position,
-				  bool can_update_disabled,
-				  bool *visible)
+static int drm_plane_helper_check_update(struct drm_plane *plane,
+					 struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 struct drm_rect *src,
+					 struct drm_rect *dst,
+					 unsigned int rotation,
+					 int min_scale,
+					 int max_scale,
+					 bool can_position,
+					 bool can_update_disabled,
+					 bool *visible)
 {
 	struct drm_plane_state plane_state = {
 		.plane = plane,
@@ -173,52 +144,14 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
 
 	return 0;
 }
-EXPORT_SYMBOL(drm_plane_helper_check_update);
 
-/**
- * drm_primary_helper_update() - Helper for primary plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler for primary planes.  This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * non-NULL framebuffer.  We call the driver's modeset handler to update the
- * framebuffer.
- *
- * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
- * return an error.
- *
- * Note that we make some assumptions about hardware limitations that may not be
- * true for all hardware --
- *
- * 1. Primary plane cannot be repositioned.
- * 2. Primary plane cannot be scaled.
- * 3. Primary plane must cover the entire CRTC.
- * 4. Subpixel positioning is not supported.
- *
- * Drivers for hardware that don't have these restrictions can provide their
- * own implementation rather than using this helper.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
-			      struct drm_framebuffer *fb,
-			      int crtc_x, int crtc_y,
-			      unsigned int crtc_w, unsigned int crtc_h,
-			      uint32_t src_x, uint32_t src_y,
-			      uint32_t src_w, uint32_t src_h,
-			      struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+				     struct drm_framebuffer *fb,
+				     int crtc_x, int crtc_y,
+				     unsigned int crtc_w, unsigned int crtc_h,
+				     uint32_t src_x, uint32_t src_y,
+				     uint32_t src_w, uint32_t src_h,
+				     struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_mode_set set = {
 		.crtc = crtc,
@@ -285,35 +218,12 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
 	kfree(connector_list);
 	return ret;
 }
-EXPORT_SYMBOL(drm_primary_helper_update);
 
-/**
- * drm_primary_helper_disable() - Helper for primary plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler for primary planes.  This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * NULL framebuffer parameter.  It unconditionally fails the disable call with
- * -EINVAL the only way to disable the primary plane without driver support is
- * to disable the entire CRTC. Which does not match the plane
- * &drm_plane_funcs.disable_plane hook.
- *
- * Note that some hardware may be able to disable the primary plane without
- * disabling the whole CRTC.  Drivers for such hardware should provide their
- * own disable handler that disables just the primary plane (and they'll likely
- * need to provide their own update handler as well to properly re-enable a
- * disabled primary plane).
- *
- * RETURNS:
- * Unconditionally returns -EINVAL.
- */
-int drm_primary_helper_disable(struct drm_plane *plane,
-			       struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_disable(struct drm_plane *plane,
+				      struct drm_modeset_acquire_ctx *ctx)
 {
 	return -EINVAL;
 }
-EXPORT_SYMBOL(drm_primary_helper_disable);
 
 /**
  * drm_primary_helper_destroy() - Helper for primary plane destruction
@@ -336,200 +246,3 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
 	.destroy = drm_primary_helper_destroy,
 };
 EXPORT_SYMBOL(drm_primary_helper_funcs);
-
-int drm_plane_helper_commit(struct drm_plane *plane,
-			    struct drm_plane_state *plane_state,
-			    struct drm_framebuffer *old_fb)
-{
-	const struct drm_plane_helper_funcs *plane_funcs;
-	struct drm_crtc *crtc[2];
-	const struct drm_crtc_helper_funcs *crtc_funcs[2];
-	int i, ret = 0;
-
-	plane_funcs = plane->helper_private;
-
-	/* Since this is a transitional helper we can't assume that plane->state
-	 * is always valid. Hence we need to use plane->crtc instead of
-	 * plane->state->crtc as the old crtc. */
-	crtc[0] = plane->crtc;
-	crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
-
-	for (i = 0; i < 2; i++)
-		crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
-
-	if (plane_funcs->atomic_check) {
-		ret = plane_funcs->atomic_check(plane, plane_state);
-		if (ret)
-			goto out;
-	}
-
-	if (plane_funcs->prepare_fb && plane_state->fb != old_fb) {
-		ret = plane_funcs->prepare_fb(plane,
-					      plane_state);
-		if (ret)
-			goto out;
-	}
-
-	/* Point of no return, commit sw state. */
-	swap(plane->state, plane_state);
-
-	for (i = 0; i < 2; i++) {
-		if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
-			crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
-	}
-
-	/*
-	 * Drivers may optionally implement the ->atomic_disable callback, so
-	 * special-case that here.
-	 */
-	if (drm_atomic_plane_disabling(plane_state, plane->state) &&
-	    plane_funcs->atomic_disable)
-		plane_funcs->atomic_disable(plane, plane_state);
-	else
-		plane_funcs->atomic_update(plane, plane_state);
-
-	for (i = 0; i < 2; i++) {
-		if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
-			crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
-	}
-
-	/*
-	 * If we only moved the plane and didn't change fb's, there's no need to
-	 * wait for vblank.
-	 */
-	if (plane->state->fb == old_fb)
-		goto out;
-
-	for (i = 0; i < 2; i++) {
-		if (!crtc[i])
-			continue;
-
-		if (crtc[i]->cursor == plane)
-			continue;
-
-		/* There's no other way to figure out whether the crtc is running. */
-		ret = drm_crtc_vblank_get(crtc[i]);
-		if (ret == 0) {
-			drm_crtc_wait_one_vblank(crtc[i]);
-			drm_crtc_vblank_put(crtc[i]);
-		}
-
-		ret = 0;
-	}
-
-	if (plane_funcs->cleanup_fb)
-		plane_funcs->cleanup_fb(plane, plane_state);
-out:
-	if (plane->funcs->atomic_destroy_state)
-		plane->funcs->atomic_destroy_state(plane, plane_state);
-	else
-		drm_atomic_helper_plane_destroy_state(plane, plane_state);
-
-	return ret;
-}
-
-/**
- * drm_plane_helper_update() - Transitional helper for plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
-			    struct drm_framebuffer *fb,
-			    int crtc_x, int crtc_y,
-			    unsigned int crtc_w, unsigned int crtc_h,
-			    uint32_t src_x, uint32_t src_y,
-			    uint32_t src_w, uint32_t src_h,
-			    struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_plane_state *plane_state;
-
-	if (plane->funcs->atomic_duplicate_state)
-		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else {
-		if (!plane->state)
-			drm_atomic_helper_plane_reset(plane);
-
-		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	}
-	if (!plane_state)
-		return -ENOMEM;
-	plane_state->plane = plane;
-
-	plane_state->crtc = crtc;
-	drm_atomic_set_fb_for_plane(plane_state, fb);
-	plane_state->crtc_x = crtc_x;
-	plane_state->crtc_y = crtc_y;
-	plane_state->crtc_h = crtc_h;
-	plane_state->crtc_w = crtc_w;
-	plane_state->src_x = src_x;
-	plane_state->src_y = src_y;
-	plane_state->src_h = src_h;
-	plane_state->src_w = src_w;
-
-	return drm_plane_helper_commit(plane, plane_state, plane->fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_update);
-
-/**
- * drm_plane_helper_disable() - Transitional helper for plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_disable(struct drm_plane *plane,
-			     struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_plane_state *plane_state;
-	struct drm_framebuffer *old_fb;
-
-	/* crtc helpers love to call disable functions for already disabled hw
-	 * functions. So cope with that. */
-	if (!plane->crtc)
-		return 0;
-
-	if (plane->funcs->atomic_duplicate_state)
-		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else {
-		if (!plane->state)
-			drm_atomic_helper_plane_reset(plane);
-
-		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	}
-	if (!plane_state)
-		return -ENOMEM;
-	plane_state->plane = plane;
-
-	plane_state->crtc = NULL;
-	old_fb = plane_state->fb;
-	drm_atomic_set_fb_for_plane(plane_state, NULL);
-
-	return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 3f0205f..231e3f6d 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
 {
 	struct drm_prime_attachment *prime_attach;
 	struct drm_gem_object *obj = dma_buf->priv;
-	struct drm_device *dev = obj->dev;
 
 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
 	if (!prime_attach)
@@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
 	prime_attach->dir = DMA_NONE;
 	attach->priv = prime_attach;
 
-	if (!dev->driver->gem_prime_pin)
-		return 0;
-
-	return dev->driver->gem_prime_pin(obj);
+	return drm_gem_pin(obj);
 }
 EXPORT_SYMBOL(drm_gem_map_attach);
 
@@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
 {
 	struct drm_prime_attachment *prime_attach = attach->priv;
 	struct drm_gem_object *obj = dma_buf->priv;
-	struct drm_device *dev = obj->dev;
 
 	if (prime_attach) {
 		struct sg_table *sgt = prime_attach->sgt;
@@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
 		attach->priv = NULL;
 	}
 
-	if (dev->driver->gem_prime_unpin)
-		dev->driver->gem_prime_unpin(obj);
+	drm_gem_unpin(obj);
 }
 EXPORT_SYMBOL(drm_gem_map_detach);
 
@@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 	if (WARN_ON(prime_attach->dir != DMA_NONE))
 		return ERR_PTR(-EBUSY);
 
-	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+	if (obj->funcs)
+		sgt = obj->funcs->get_sg_table(obj);
+	else
+		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
 	if (!IS_ERR(sgt)) {
 		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
@@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
 	struct drm_gem_object *obj = dma_buf->priv;
-	struct drm_device *dev = obj->dev;
+	void *vaddr;
 
-	if (dev->driver->gem_prime_vmap)
-		return dev->driver->gem_prime_vmap(obj);
-	else
-		return NULL;
+	vaddr = drm_gem_vmap(obj);
+	if (IS_ERR(vaddr))
+		vaddr = NULL;
+
+	return vaddr;
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
 
@@ -426,42 +424,12 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
 	struct drm_gem_object *obj = dma_buf->priv;
-	struct drm_device *dev = obj->dev;
 
-	if (dev->driver->gem_prime_vunmap)
-		dev->driver->gem_prime_vunmap(obj, vaddr);
+	drm_gem_vunmap(obj, vaddr);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
 /**
- * drm_gem_dmabuf_kmap - map implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map callback.
- */
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
-	return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
-
-/**
- * drm_gem_dmabuf_kunmap - unmap implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
- */
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
-			   void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
-
-/**
  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
  * @dma_buf: buffer to be mapped
  * @vma: virtual address range
@@ -489,8 +457,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 	.map_dma_buf = drm_gem_map_dma_buf,
 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 	.release = drm_gem_dmabuf_release,
-	.map = drm_gem_dmabuf_kmap,
-	.unmap = drm_gem_dmabuf_kunmap,
 	.mmap = drm_gem_dmabuf_mmap,
 	.vmap = drm_gem_dmabuf_vmap,
 	.vunmap = drm_gem_dmabuf_vunmap,
@@ -559,7 +525,12 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
 		return dmabuf;
 	}
 
-	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+	if (obj->funcs && obj->funcs->export)
+		dmabuf = obj->funcs->export(obj, flags);
+	else if (dev->driver->gem_prime_export)
+		dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+	else
+		dmabuf = drm_gem_prime_export(dev, obj, flags);
 	if (IS_ERR(dmabuf)) {
 		/* normally the created dma-buf takes ownership of the ref,
 		 * but if that fails then drop the ref
@@ -679,6 +650,52 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
 /**
+ * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
+ * @obj: GEM object
+ * @vma: Virtual address range
+ *
+ * This function sets up a userspace mapping for PRIME exported buffers using
+ * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
+ * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
+ * called to set up the mapping.
+ *
+ * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
+ */
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	struct drm_file *priv;
+	struct file *fil;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
+	if (!priv || !fil) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Used by drm_gem_mmap() to lookup the GEM object */
+	priv->minor = obj->dev->primary;
+	fil->private_data = priv;
+
+	ret = drm_vma_node_allow(&obj->vma_node, priv);
+	if (ret)
+		goto out;
+
+	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
+	ret = obj->dev->driver->fops->mmap(fil, vma);
+
+	drm_vma_node_revoke(&obj->vma_node, priv);
+out:
+	kfree(priv);
+	kfree(fil);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_mmap);
+
+/**
  * drm_gem_prime_import_dev - core implementation of the import callback
  * @dev: drm_device to import into
  * @dma_buf: dma-buf object to import
@@ -792,7 +809,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
 	/* never seen this one, need to import */
 	mutex_lock(&dev->object_name_lock);
-	obj = dev->driver->gem_prime_import(dev, dma_buf);
+	if (dev->driver->gem_prime_import)
+		obj = dev->driver->gem_prime_import(dev, dma_buf);
+	else
+		obj = drm_gem_prime_import(dev, dma_buf);
 	if (IS_ERR(obj)) {
 		ret = PTR_ERR(obj);
 		goto out_unlock;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 51fa978..9178124 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -190,6 +190,13 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
 	pipe->funcs->cleanup_fb(pipe, state);
 }
 
+static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
+						uint32_t format,
+						uint64_t modifier)
+{
+	return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
 static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
 	.prepare_fb = drm_simple_kms_plane_prepare_fb,
 	.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
@@ -204,6 +211,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
 	.reset			= drm_atomic_helper_plane_reset,
 	.atomic_duplicate_state	= drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+	.format_mod_supported   = drm_simple_kms_format_mod_supported,
 };
 
 /**
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 5c2091d..db30a0e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,22 +56,6 @@
 #include "drm_internal.h"
 #include <drm/drm_syncobj.h>
 
-struct drm_syncobj_stub_fence {
-	struct dma_fence base;
-	spinlock_t lock;
-};
-
-static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
-{
-        return "syncobjstub";
-}
-
-static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
-	.get_driver_name = drm_syncobj_stub_fence_get_name,
-	.get_timeline_name = drm_syncobj_stub_fence_get_name,
-};
-
-
 /**
  * drm_syncobj_find - lookup and reference a sync object.
  * @file_private: drm file private pointer
@@ -113,8 +97,6 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
 {
 	int ret;
 
-	WARN_ON(*fence);
-
 	*fence = drm_syncobj_fence_get(syncobj);
 	if (*fence)
 		return 1;
@@ -158,13 +140,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
 /**
  * drm_syncobj_replace_fence - replace fence in a sync object.
  * @syncobj: Sync object to replace fence in
- * @point: timeline point
  * @fence: fence to install in sync file.
  *
- * This replaces the fence on a sync object, or a timeline point fence.
+ * This replaces the fence on a sync object.
  */
 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
-			       u64 point,
 			       struct dma_fence *fence)
 {
 	struct dma_fence *old_fence;
@@ -192,23 +172,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 }
 EXPORT_SYMBOL(drm_syncobj_replace_fence);
 
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+/**
+ * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
+ * @syncobj: sync object to assign the fence on
+ *
+ * Assign a already signaled stub fence to the sync object.
+ */
+static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
 {
-	struct drm_syncobj_stub_fence *fence;
-	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
-	if (fence == NULL)
-		return -ENOMEM;
+	struct dma_fence *fence = dma_fence_get_stub();
 
-	spin_lock_init(&fence->lock);
-	dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
-		       &fence->lock, 0, 0);
-	dma_fence_signal(&fence->base);
-
-	drm_syncobj_replace_fence(syncobj, 0, &fence->base);
-
-	dma_fence_put(&fence->base);
-
-	return 0;
+	drm_syncobj_replace_fence(syncobj, fence);
+	dma_fence_put(fence);
 }
 
 /**
@@ -216,6 +191,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
  * @file_private: drm file private pointer
  * @handle: sync object handle to lookup.
  * @point: timeline point
+ * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
  * @fence: out parameter for the fence
  *
  * This is just a convenience function that combines drm_syncobj_find() and
@@ -226,7 +202,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
  * dma_fence_put().
  */
 int drm_syncobj_find_fence(struct drm_file *file_private,
-			   u32 handle, u64 point,
+			   u32 handle, u64 point, u64 flags,
 			   struct dma_fence **fence)
 {
 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
@@ -255,7 +231,7 @@ void drm_syncobj_free(struct kref *kref)
 	struct drm_syncobj *syncobj = container_of(kref,
 						   struct drm_syncobj,
 						   refcount);
-	drm_syncobj_replace_fence(syncobj, 0, NULL);
+	drm_syncobj_replace_fence(syncobj, NULL);
 	kfree(syncobj);
 }
 EXPORT_SYMBOL(drm_syncobj_free);
@@ -275,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free);
 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 		       struct dma_fence *fence)
 {
-	int ret;
 	struct drm_syncobj *syncobj;
 
 	syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -286,16 +261,11 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 	INIT_LIST_HEAD(&syncobj->cb_list);
 	spin_lock_init(&syncobj->lock);
 
-	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
-		ret = drm_syncobj_assign_null_handle(syncobj);
-		if (ret < 0) {
-			drm_syncobj_put(syncobj);
-			return ret;
-		}
-	}
+	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
+		drm_syncobj_assign_null_handle(syncobj);
 
 	if (fence)
-		drm_syncobj_replace_fence(syncobj, 0, fence);
+		drm_syncobj_replace_fence(syncobj, fence);
 
 	*out_syncobj = syncobj;
 	return 0;
@@ -480,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
 		return -ENOENT;
 	}
 
-	drm_syncobj_replace_fence(syncobj, 0, fence);
+	drm_syncobj_replace_fence(syncobj, fence);
 	dma_fence_put(fence);
 	drm_syncobj_put(syncobj);
 	return 0;
@@ -497,7 +467,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
 	if (fd < 0)
 		return fd;
 
-	ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
+	ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
 	if (ret)
 		goto err_put_fd;
 
@@ -719,9 +689,6 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
 		for (i = 0; i < count; ++i) {
-			if (entries[i].fence)
-				continue;
-
 			drm_syncobj_fence_get_or_add_callback(syncobjs[i],
 							      &entries[i].fence,
 							      &entries[i].syncobj_cb,
@@ -954,7 +921,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	for (i = 0; i < args->count_handles; i++)
-		drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
+		drm_syncobj_replace_fence(syncobjs[i], NULL);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
@@ -986,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < args->count_handles; i++) {
-		ret = drm_syncobj_assign_null_handle(syncobjs[i]);
-		if (ret < 0)
-			break;
-	}
+	for (i = 0; i < args->count_handles; i++)
+		drm_syncobj_assign_null_handle(syncobjs[i]);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 83c1f46..52802e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -550,7 +550,7 @@ static int etnaviv_bind(struct device *dev)
 out_bind:
 	kfree(priv);
 out_unref:
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return ret;
 }
@@ -567,7 +567,7 @@ static void etnaviv_unbind(struct device *dev)
 	drm->dev_private = NULL;
 	kfree(priv);
 
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 }
 
 static const struct component_master_ops etnaviv_master_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9146e30..3fbb485 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 	unsigned int n_obj, n_bomap_pages;
 	size_t file_size, mmu_size;
 	__le64 *bomap, *bomap_start;
+	unsigned long flags;
 
 	/* Only catch the first event, or when manually re-armed */
 	if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 		    mmu_size + gpu->buffer.size;
 
 	/* Add in the active command buffers */
-	spin_lock(&gpu->sched.job_list_lock);
+	spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
 	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
 		submit = to_etnaviv_submit(s_job);
 		file_size += submit->cmdbuf.size;
 		n_obj++;
 	}
-	spin_unlock(&gpu->sched.job_list_lock);
+	spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
 
 	/* Add in the active buffer objects */
 	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 			      gpu->buffer.size,
 			      etnaviv_cmdbuf_get_va(&gpu->buffer));
 
-	spin_lock(&gpu->sched.job_list_lock);
+	spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
 	list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
 		submit = to_etnaviv_submit(s_job);
 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
 				      submit->cmdbuf.vaddr, submit->cmdbuf.size,
 				      etnaviv_cmdbuf_get_va(&submit->cmdbuf));
 	}
-	spin_unlock(&gpu->sched.job_list_lock);
+	spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
 
 	/* Reserve space for the bomap */
 	if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 983e67f..30875f8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,7 +179,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 		struct reservation_object *robj = bo->obj->resv;
 
 		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
-			ret = reservation_object_reserve_shared(robj);
+			ret = reservation_object_reserve_shared(robj, 1);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 9b47636..49a6763 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 	change = dma_addr - gpu->hangcheck_dma_addr;
 	if (change < 0 || change > 16) {
 		gpu->hangcheck_dma_addr = dma_addr;
-		schedule_delayed_work(&sched_job->sched->work_tdr,
-				      sched_job->sched->timeout);
 		return;
 	}
 
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 {
 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	etnaviv_submit_put(submit);
 }
 
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 						submit->out_fence, 0,
 						INT_MAX, GFP_KERNEL);
 	if (submit->out_fence_id < 0) {
+		drm_sched_job_cleanup(&submit->sched_job);
 		ret = -ENOMEM;
 		goto out_unlock;
 	}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27..3691a14 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@
 
 if DRM_EXYNOS
 
-config DRM_EXYNOS_IOMMU
-	bool
-	depends on EXYNOS_IOMMU
-	default y
-
 comment "CRTCs"
 
 config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146b..2fd2f3e 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
-		exynos_drm_gem.o exynos_drm_plane.o
+		exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
 
 exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)	+= exynos5433_drm_decon.o
 exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON)	+= exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index aef487d..5b4e0e8 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 #include "regs-decon5433.h"
 
 #define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
 	[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
 };
 
+static const unsigned int capabilities[WINDOWS_NR] = {
+	0,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+	EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
 static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
 				  u32 val)
 {
@@ -252,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+				unsigned int alpha, unsigned int pixel_alpha)
+{
+	u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+	u32 val = 0;
+
+	switch (pixel_alpha) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+	case DRM_MODE_BLEND_COVERAGE:
+		val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+		val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		break;
+	case DRM_MODE_BLEND_PREMULTI:
+	default:
+		if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+			val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+			val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		} else {
+			val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+			val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+		}
+		break;
+	}
+	decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+				 unsigned int alpha, unsigned int pixel_alpha)
+{
+	u32 win_alpha = alpha >> 8;
+	u32 val = 0;
+
+	switch (pixel_alpha) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+		break;
+	case DRM_MODE_BLEND_COVERAGE:
+	case DRM_MODE_BLEND_PREMULTI:
+	default:
+		val |= WINCONx_ALPHA_SEL_F;
+		val |= WINCONx_BLD_PIX_F;
+		val |= WINCONx_ALPHA_MUL_F;
+		break;
+	}
+	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+	if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+		val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+		      VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+		      VIDOSD_Wx_ALPHA_B_F(win_alpha);
+		decon_set_bits(ctx, DECON_VIDOSDxC(win),
+			       VIDOSDxC_ALPHA0_RGB_MASK, val);
+		decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+	}
+}
+
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 				 struct drm_framebuffer *fb)
 {
+	struct exynos_drm_plane plane = ctx->planes[win];
+	struct exynos_drm_plane_state *state =
+		to_exynos_plane_state(plane.base.state);
+	unsigned int alpha = state->base.alpha;
+	unsigned int pixel_alpha;
 	unsigned long val;
 
+	if (fb->format->has_alpha)
+		pixel_alpha = state->base.pixel_blend_mode;
+	else
+		pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val &= WINCONx_ENWIN_F;
 
@@ -279,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 	case DRM_FORMAT_ARGB8888:
 	default:
 		val |= WINCONx_BPPMODE_32BPP_A8888;
-		val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+		val |= WINCONx_WSWP_F;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
 	}
@@ -298,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
+	decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
 
-	writel(val, ctx->addr + DECON_WINCONx(win));
+	if (win > 0) {
+		decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+		decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+	}
 }
 
 static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -552,6 +628,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
 		ctx->configs[win].zpos = win - ctx->first_win;
 		ctx->configs[win].type = decon_win_types[win];
+		ctx->configs[win].capabilities = capabilities[win];
 
 		ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
 					&ctx->configs[win]);
@@ -569,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 
 	decon_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, dev);
+	return exynos_drm_register_dma(drm_dev, dev);
 }
 
 static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -579,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
 	decon_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 }
 
 static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd00..381aa3d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
 #include "regs-decon7.h"
 
 /*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 
 	decon_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, ctx->dev);
+	return exynos_drm_register_dma(drm_dev, ctx->dev);
 }
 
 static void decon_ctx_remove(struct decon_context *ctx)
 {
 	/* detach this sub driver from iommu mapping if supported. */
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 0000000..3432c5e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...)	({ NULL; })
+#define arm_iommu_attach_device(...)	({ -ENODEV; })
+#define arm_iommu_release_mapping(...)	({ })
+#define arm_iommu_detach_device(...)	({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START	0x20000000
+#define EXYNOS_DEV_ADDR_SIZE	0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+	if (!dev->dma_parms)
+		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+	if (!dev->dma_parms)
+		return -ENOMEM;
+
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+	kfree(dev->dma_parms);
+	dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+	int ret;
+
+	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+		DRM_ERROR("Device %s lacks support for IOMMU\n",
+			  dev_name(subdrv_dev));
+		return -EINVAL;
+	}
+
+	ret = configure_dma_max_seg_size(subdrv_dev);
+	if (ret)
+		return ret;
+
+	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+		if (to_dma_iommu_mapping(subdrv_dev))
+			arm_iommu_detach_device(subdrv_dev);
+
+		ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+	} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+		ret = iommu_attach_device(priv->mapping, subdrv_dev);
+	}
+
+	if (ret)
+		clear_dma_max_seg_size(subdrv_dev);
+
+	return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+
+	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+		arm_iommu_detach_device(subdrv_dev);
+	else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+		iommu_detach_device(priv->mapping, subdrv_dev);
+
+	clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+	struct exynos_drm_private *priv = drm->dev_private;
+
+	if (!priv->dma_dev) {
+		priv->dma_dev = dev;
+		DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+			 dev_name(dev));
+	}
+
+	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		return 0;
+
+	if (!priv->mapping) {
+		void *mapping;
+
+		if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+			mapping = arm_iommu_create_mapping(&platform_bus_type,
+				EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+		else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+			mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+		if (IS_ERR(mapping))
+			return PTR_ERR(mapping);
+		priv->mapping = mapping;
+	}
+
+	return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+	if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+	struct exynos_drm_private *priv = drm->dev_private;
+
+	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+		return;
+
+	arm_iommu_release_mapping(priv->mapping);
+	priv->mapping = NULL;
+	priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf..2c75e78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME	"exynos"
 #define DRIVER_DESC	"Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
 
 #define DRM_COMPONENT_DRIVER	BIT(0)	/* supports component framework */
 #define DRM_VIRTUAL_DEVICE	BIT(1)	/* create virtual platform device */
-#define DRM_DMA_DEVICE		BIT(2)	/* can be used for dma allocations */
-#define DRM_FIMC_DEVICE		BIT(3)	/* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE		BIT(2)	/* devices shared with V4L2 subsystem */
 
 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
 
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
 static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 	{
 		DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
-		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
 		DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
 	return match ?: ERR_PTR(-ENODEV);
 }
 
-static struct device *exynos_drm_get_dma_device(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
-		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
-		struct device *dev;
-
-		if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
-			continue;
-
-		while ((dev = bus_find_device(&platform_bus_type, NULL,
-					    &info->driver->driver,
-					    (void *)platform_bus_type.match))) {
-			put_device(dev);
-			return dev;
-		}
-	}
-	return NULL;
-}
-
 static int exynos_drm_bind(struct device *dev)
 {
 	struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
 	dev_set_drvdata(dev, drm);
 	drm->dev_private = (void *)private;
 
-	/* the first real CRTC device is used for all dma mapping operations */
-	private->dma_dev = exynos_drm_get_dma_device();
-	if (!private->dma_dev) {
-		DRM_ERROR("no device found for DMA mapping operations.\n");
-		ret = -ENODEV;
-		goto err_free_private;
-	}
-	DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
-		 dev_name(private->dma_dev));
-
-	/* create common IOMMU mapping for all devices attached to Exynos DRM */
-	ret = drm_create_iommu_mapping(drm);
-	if (ret < 0) {
-		DRM_ERROR("failed to create iommu mapping.\n");
-		goto err_free_private;
-	}
-
 	drm_mode_config_init(drm);
 
 	exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ static int exynos_drm_bind(struct device *dev)
 	component_unbind_all(drm->dev, drm);
 err_mode_config_cleanup:
 	drm_mode_config_cleanup(drm);
-	drm_release_iommu_mapping(drm);
-err_free_private:
+	exynos_drm_cleanup_dma(drm);
 	kfree(private);
 err_free_drm:
 	drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
 
 	component_unbind_all(drm->dev, drm);
 	drm_mode_config_cleanup(drm);
-	drm_release_iommu_mapping(drm);
+	exynos_drm_cleanup_dma(drm);
 
 	kfree(drm->dev_private);
 	drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e61e70..71eb240 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -214,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
 	return priv->dma_dev;
 }
 
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+
+	return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
 #ifdef CONFIG_DRM_EXYNOS_DPI
 struct drm_encoder *exynos_dpi_probe(struct device *dev);
 int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382..31eb538 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
 static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 01d1822..ce9604c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR		4
 #define PREFERRED_BPP		32
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670..90dfea0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-fimc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	ctx->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(drm_dev, dev);
+	exynos_drm_unregister_dma(drm_dev, dev);
 }
 
 static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f5693..e3d6a85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
 #include "exynos_drm_fb.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 
 /*
  * FIMD stands for Fully Interactive Mobile Display and
@@ -1011,7 +1010,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
 	if (is_drm_iommu_supported(drm_dev))
 		fimd_clear_channels(ctx->crtc);
 
-	return drm_iommu_attach_device(drm_dev, dev);
+	return exynos_drm_register_dma(drm_dev, dev);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1020,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
 
 	fimd_disable(ctx->crtc);
 
-	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
 
 	if (ctx->encoder)
 		exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2..24c536d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
 
 #define G2D_HW_MAJOR_VER		4
 #define G2D_HW_MINOR_VER		1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
 		return ret;
 	}
 
-	ret = drm_iommu_attach_device(drm_dev, dev);
+	ret = exynos_drm_register_dma(drm_dev, dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable iommu.\n");
 		g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
 	priv->g2d_dev = NULL;
 
 	cancel_work_sync(&g2d->runqueue_work);
-	drm_iommu_detach_device(g2d->drm_dev, dev);
+	exynos_drm_unregister_dma(g2d->drm_dev, dev);
 }
 
 static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85..df66c38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
 
 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46..f048d97 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-gsc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	ctx->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &ctx->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(drm_dev, dev);
+	exynos_drm_unregister_dma(drm_dev, dev);
 }
 
 static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f37370..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
-	if (!dev->dma_parms)
-		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
-	if (!dev->dma_parms)
-		return -ENOMEM;
-
-	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-	return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
-	kfree(dev->dma_parms);
-	dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
-					     EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	__exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-	int ret;
-
-	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
-		DRM_ERROR("Device %s lacks support for IOMMU\n",
-			  dev_name(subdrv_dev));
-		return -EINVAL;
-	}
-
-	ret = configure_dma_max_seg_size(subdrv_dev);
-	if (ret)
-		return ret;
-
-	ret = __exynos_iommu_attach(priv, subdrv_dev);
-	if (ret)
-		clear_dma_max_seg_size(subdrv_dev);
-
-	return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	__exynos_iommu_detach(priv, subdrv_dev);
-	clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START	0x20000000
-#define EXYNOS_DEV_ADDR_SIZE	0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
-					unsigned long start, unsigned long size)
-{
-	priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
-						 size);
-	return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
-	arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
-					struct device *dev)
-{
-	if (dev->archdata.mapping)
-		arm_iommu_detach_device(dev);
-
-	return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
-					 struct device *dev)
-{
-	arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
-					unsigned long start, unsigned long size)
-{
-	priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
-	return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
-	priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
-					struct device *dev)
-{
-	struct iommu_domain *domain = priv->mapping;
-
-	if (dev != priv->dma_dev)
-		return iommu_attach_device(domain, dev);
-	return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
-					 struct device *dev)
-{
-	struct iommu_domain *domain = priv->mapping;
-
-	if (dev != priv->dma_dev)
-		iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
-				struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
-				struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
-	struct exynos_drm_private *priv = drm_dev->dev_private;
-
-	return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
-	return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
-						struct device *subdrv_dev)
-{
-	return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
-						struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
-	return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68..8d67b2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &rot->ipp;
 
 	rot->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			   DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &rot->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(rot->drm_dev, rot->dev);
+	exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
 }
 
 static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774..71270ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
 #include "regs-scaler.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 #define scaler_read(offset)		readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_drm_ipp *ipp = &scaler->ipp;
 
 	scaler->drm_dev = drm_dev;
-	drm_iommu_attach_device(drm_dev, dev);
+	exynos_drm_register_dma(drm_dev, dev);
 
 	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
 			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
 	struct exynos_drm_ipp *ipp = &scaler->ipp;
 
 	exynos_drm_ipp_unregister(drm_dev, ipp);
-	drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+	exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
 }
 
 static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecb..0573eab 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
 
 #define MIXER_WIN_NR		3
 #define VP_DEFAULT_WIN		2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
 	mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
 }
 
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
 {
+	enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
 	u32 val;
 
-	switch (height) {
-	case 480:
-	case 576:
-		val = MXR_CFG_RGB601_0_255;
-		break;
-	case 720:
-	case 1080:
-	default:
-		val = MXR_CFG_RGB709_16_235;
+	if (mode->vdisplay < 720) {
+		val = MXR_CFG_RGB601;
+	} else {
+		val = MXR_CFG_RGB709;
+
 		/* Configure the BT.709 CSC matrix for full range RGB. */
 		mixer_reg_write(ctx, MXR_CM_COEFF_Y,
 			MXR_CSC_CT( 0.184,  0.614,  0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
 			MXR_CSC_CT(-0.102, -0.338,  0.440));
 		mixer_reg_write(ctx, MXR_CM_COEFF_CR,
 			MXR_CSC_CT( 0.440, -0.399, -0.040));
-		break;
 	}
 
+	if (range == HDMI_QUANTIZATION_RANGE_FULL)
+		val |= MXR_CFG_QUANT_RANGE_FULL;
+	else
+		val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
 	mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
 }
 
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
 	struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
 
 	mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
-	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode);
 	mixer_run(ctx);
 }
 
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
 		}
 	}
 
-	return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+	return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
 }
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+	exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e4..63db697 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
 #define WINCONx_BURSTLEN_16WORD		(0x0 << 10)
 #define WINCONx_BURSTLEN_8WORD		(0x1 << 10)
 #define WINCONx_BURSTLEN_4WORD		(0x2 << 10)
+#define WINCONx_ALPHA_MUL_F		(1 << 7)
 #define WINCONx_BLD_PIX_F		(1 << 6)
 #define WINCONx_BPPMODE_MASK		(0xf << 2)
 #define WINCONx_BPPMODE_16BPP_565	(0x5 << 2)
@@ -116,11 +117,15 @@
 #define WINCONx_BPPMODE_16BPP_A4444	(0xe << 2)
 #define WINCONx_ALPHA_SEL_F		(1 << 1)
 #define WINCONx_ENWIN_F			(1 << 0)
+#define WINCONx_BLEND_MODE_MASK		(0xc2)
 
 /* SHADOWCON */
 #define SHADOWCON_PROTECT_MASK		GENMASK(14, 10)
 #define SHADOWCON_Wx_PROTECT(n)		(1 << (10 + (n)))
 
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK	(0xffffff)
+
 /* VIDOSDxD */
 #define VIDOSD_Wx_ALPHA_R_F(n)		(((n) & 0xff) << 16)
 #define VIDOSD_Wx_ALPHA_G_F(n)		(((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
 #define CRCCTRL_CRCEN			(0x1 << 0)
 #define CRCCTRL_MASK			(0x7)
 
+/* BLENDCON */
+#define BLEND_NEW			(1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO			0x0
+#define BLENDERQ_ONE			0x1
+#define BLENDERQ_ALPHA_A		0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A	0x3
+#define BLENDERQ_ALPHA0			0x6
+#define BLENDERQ_Q_FUNC_F(n)		(n << 18)
+#define BLENDERQ_P_FUNC_F(n)		(n << 12)
+#define BLENDERQ_B_FUNC_F(n)		(n << 6)
+#define BLENDERQ_A_FUNC_F(n)		(n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW			(1 << 0)
+
 #endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194..5ff095b 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
 /* bits for MXR_CFG */
 #define MXR_CFG_LAYER_UPDATE		(1 << 31)
 #define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255		(0 << 9)
-#define MXR_CFG_RGB601_16_235		(1 << 9)
-#define MXR_CFG_RGB709_0_255		(2 << 9)
-#define MXR_CFG_RGB709_16_235		(3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL	(0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED	(1 << 9)
+#define MXR_CFG_RGB601			(0 << 10)
+#define MXR_CFG_RGB709			(1 << 10)
+
 #define MXR_CFG_RGB_FMT_MASK		0x600
 #define MXR_CFG_OUT_YUV444		(0 << 8)
 #define MXR_CFG_OUT_RGB888		(1 << 8)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 0e37524..18afc94 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -17,6 +17,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <video/videomode.h>
 
 #include "fsl_dcu_drm_crtc.h"
 #include "fsl_dcu_drm_drv.h"
@@ -85,40 +86,34 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	struct drm_connector *con = &fsl_dev->connector.base;
 	struct drm_display_mode *mode = &crtc->state->mode;
-	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, index, pol = 0;
+	unsigned int pol = 0;
+	struct videomode vm;
 
-	index = drm_crtc_index(crtc);
 	clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000);
 
-	/* Configure timings: */
-	hbp = mode->htotal - mode->hsync_end;
-	hfp = mode->hsync_start - mode->hdisplay;
-	hsw = mode->hsync_end - mode->hsync_start;
-	vbp = mode->vtotal - mode->vsync_end;
-	vfp = mode->vsync_start - mode->vdisplay;
-	vsw = mode->vsync_end - mode->vsync_start;
+	drm_display_mode_to_videomode(mode, &vm);
 
 	/* INV_PXCK as default (most display sample data on rising edge) */
 	if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
 		pol |= DCU_SYN_POL_INV_PXCK;
 
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+	if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
 		pol |= DCU_SYN_POL_INV_HS_LOW;
 
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+	if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
 		pol |= DCU_SYN_POL_INV_VS_LOW;
 
 	regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
-		     DCU_HSYN_PARA_BP(hbp) |
-		     DCU_HSYN_PARA_PW(hsw) |
-		     DCU_HSYN_PARA_FP(hfp));
+		     DCU_HSYN_PARA_BP(vm.hback_porch) |
+		     DCU_HSYN_PARA_PW(vm.hsync_len) |
+		     DCU_HSYN_PARA_FP(vm.hfront_porch));
 	regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
-		     DCU_VSYN_PARA_BP(vbp) |
-		     DCU_VSYN_PARA_PW(vsw) |
-		     DCU_VSYN_PARA_FP(vfp));
+		     DCU_VSYN_PARA_BP(vm.vback_porch) |
+		     DCU_VSYN_PARA_PW(vm.vsync_len) |
+		     DCU_VSYN_PARA_FP(vm.vfront_porch));
 	regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
-		     DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
-		     DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+		     DCU_DISP_SIZE_DELTA_Y(vm.vactive) |
+		     DCU_DISP_SIZE_DELTA_X(vm.hactive));
 	regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
 	regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
 		     DCU_BGND_G(0) | DCU_BGND_B(0));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0496be5..ceddc3e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -26,6 +26,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_modeset_helper.h>
 
@@ -89,20 +90,11 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
 			"Invalid legacyfb_depth.  Defaulting to 24bpp\n");
 		legacyfb_depth = 24;
 	}
-	fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
-	if (IS_ERR(fsl_dev->fbdev)) {
-		ret = PTR_ERR(fsl_dev->fbdev);
-		fsl_dev->fbdev = NULL;
-		goto done;
-	}
 
 	return 0;
 done:
 	drm_kms_helper_poll_fini(dev);
 
-	if (fsl_dev->fbdev)
-		drm_fbdev_cma_fini(fsl_dev->fbdev);
-
 	drm_mode_config_cleanup(dev);
 	drm_irq_uninstall(dev);
 	dev->dev_private = NULL;
@@ -112,14 +104,9 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
 
 static void fsl_dcu_unload(struct drm_device *dev)
 {
-	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
 	drm_atomic_helper_shutdown(dev);
 	drm_kms_helper_poll_fini(dev);
 
-	if (fsl_dev->fbdev)
-		drm_fbdev_cma_fini(fsl_dev->fbdev);
-
 	drm_mode_config_cleanup(dev);
 	drm_irq_uninstall(dev);
 
@@ -147,19 +134,11 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-static void fsl_dcu_drm_lastclose(struct drm_device *dev)
-{
-	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
-	drm_fbdev_cma_restore_mode(fsl_dev->fbdev);
-}
-
 DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
 
 static struct drm_driver fsl_dcu_drm_driver = {
 	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
 				| DRIVER_PRIME | DRIVER_ATOMIC,
-	.lastclose		= fsl_dcu_drm_lastclose,
 	.load			= fsl_dcu_load,
 	.unload			= fsl_dcu_unload,
 	.irq_handler		= fsl_dcu_drm_irq,
@@ -355,6 +334,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto put;
 
+	drm_fbdev_generic_setup(drm, legacyfb_depth);
+
 	return 0;
 
 put:
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 93bfb98..cb87bb7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -191,7 +191,6 @@ struct fsl_dcu_drm_device {
 	/*protects hardware register*/
 	spinlock_t irq_lock;
 	struct drm_device *drm;
-	struct drm_fbdev_cma *fbdev;
 	struct drm_crtc crtc;
 	struct drm_encoder encoder;
 	struct fsl_dcu_drm_connector connector;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 45c25a4..3c168ae 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -49,8 +49,6 @@ struct hibmc_drm_private {
 	bool mode_config_initialized;
 
 	/* ttm */
-	struct drm_global_reference mem_global_ref;
-	struct ttm_bo_global_ref bo_global_ref;
 	struct ttm_bo_device bdev;
 	bool initialized;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 2e3e0bd..dd38326 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct hibmc_drm_private, bdev);
 }
 
-static int
-hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
-{
-	int ret;
-
-	hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
-	hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
-	hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
-	hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
-	ret = drm_global_item_ref(&hibmc->mem_global_ref);
-	if (ret) {
-		DRM_ERROR("could not get ref on ttm global: %d\n", ret);
-		return ret;
-	}
-
-	hibmc->bo_global_ref.mem_glob =
-		hibmc->mem_global_ref.object;
-	hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
-	hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
-	hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
-	hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
-	ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
-	if (ret) {
-		DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
-		drm_global_item_unref(&hibmc->mem_global_ref);
-		return ret;
-	}
-	return 0;
-}
-
-static void
-hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
-{
-	drm_global_item_unref(&hibmc->bo_global_ref.ref);
-	drm_global_item_unref(&hibmc->mem_global_ref);
-	hibmc->mem_global_ref.release = NULL;
-}
-
 static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
 	struct drm_device *dev = hibmc->dev;
 	struct ttm_bo_device *bdev = &hibmc->bdev;
 
-	ret = hibmc_ttm_global_init(hibmc);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&hibmc->bdev,
-				 hibmc->bo_global_ref.ref.object,
 				 &hibmc_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
 				 true);
 	if (ret) {
-		hibmc_ttm_global_release(hibmc);
 		DRM_ERROR("error initializing bo driver: %d\n", ret);
 		return ret;
 	}
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
 	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
 			     hibmc->fb_size >> PAGE_SHIFT);
 	if (ret) {
-		hibmc_ttm_global_release(hibmc);
 		DRM_ERROR("failed ttm VRAM init: %d\n", ret);
 		return ret;
 	}
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
 		return;
 
 	ttm_bo_device_release(&hibmc->bdev);
-	hibmc_ttm_global_release(hibmc);
 	hibmc->mm_inited = false;
 }
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f1..19b5fe5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@
 	  i915_gemfs.o \
 	  i915_query.o \
 	  i915_request.o \
+	  i915_scheduler.o \
 	  i915_timeline.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
@@ -112,6 +113,8 @@
 	  intel_bios.o \
 	  intel_cdclk.o \
 	  intel_color.o \
+	  intel_combo_phy.o \
+	  intel_connector.o \
 	  intel_display.o \
 	  intel_dpio_phy.o \
 	  intel_dpll_mgr.o \
@@ -120,9 +123,9 @@
 	  intel_frontbuffer.o \
 	  intel_hdcp.o \
 	  intel_hotplug.o \
-	  intel_modes.o \
 	  intel_overlay.o \
 	  intel_psr.o \
+	  intel_quirks.o \
 	  intel_sideband.o \
 	  intel_sprite.o
 i915-$(CONFIG_ACPI)		+= intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@
 	  intel_dp_link_training.o \
 	  intel_dp_mst.o \
 	  intel_dp.o \
+	  intel_dsi.o \
 	  intel_dsi_dcs_backlight.o \
 	  intel_dsi_vbt.o \
 	  intel_dvo.o \
@@ -153,14 +157,17 @@
 	  intel_sdvo.o \
 	  intel_tv.o \
 	  vlv_dsi.o \
-	  vlv_dsi_pll.o
+	  vlv_dsi_pll.o \
+	  intel_vdsc.o
 
 # Post-mortem debug and GPU hang state capture
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
 i915-$(CONFIG_DRM_I915_SELFTEST) += \
 	selftests/i915_random.o \
 	selftests/i915_selftest.o \
-	selftests/igt_flush_test.o
+	selftests/igt_flush_test.o \
+	selftests/igt_reset.o \
+	selftests/igt_spinner.o
 
 # virtual gpu code
 i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003..b8fbe3fa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
 }
 
+static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+					 struct i915_gem_context *ctx)
+{
+	struct intel_vgpu_mm *mm = workload->shadow_mm;
+	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+	int i = 0;
+
+	if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
+		return -1;
+
+	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+		px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+	} else {
+		for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+			px_dma(ppgtt->pdp.page_directory[i]) =
+				mm->ppgtt_mm.shadow_pdps[i];
+		}
+	}
+
+	return 0;
+}
+
 /**
  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
  * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (workload->req)
 		return 0;
 
+	ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+	if (ret < 0) {
+		gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+		return ret;
+	}
+
 	/* pin shadow context by gvt even the shadow context will be pinned
 	 * when i915 alloc request. That is because gvt will update the guest
 	 * context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a..38dcee1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 			      size_t count, loff_t *pos)
 {
-	struct i915_gpu_state *error = file->private_data;
-	struct drm_i915_error_state_buf str;
+	struct i915_gpu_state *error;
 	ssize_t ret;
-	loff_t tmp;
+	void *buf;
 
+	error = file->private_data;
 	if (!error)
 		return 0;
 
-	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
-	if (ret)
-		return ret;
+	/* Bounce buffer required because of kernfs __user API convenience. */
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
-	ret = i915_error_state_to_str(&str, error);
-	if (ret)
+	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+	if (ret <= 0)
 		goto out;
 
-	tmp = 0;
-	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
-	if (ret < 0)
-		goto out;
+	if (!copy_to_user(ubuf, buf, ret))
+		*pos += ret;
+	else
+		ret = -EFAULT;
 
-	*pos = str.start + ret;
 out:
-	i915_error_state_buf_release(&str);
+	kfree(buf);
 	return ret;
 }
 
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	if (!IS_GEN5(dev_priv))
 		return -ENODEV;
 
+	intel_runtime_pm_get(dev_priv);
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	seq_printf(m, "GFX power: %ld\n", gfx);
 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
 
+	intel_runtime_pm_put(dev_priv);
+
 	return 0;
 }
 
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	u32 act_freq = rps->cur_freq;
 	struct drm_file *file;
 
+	if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+			mutex_lock(&dev_priv->pcu_lock);
+			act_freq = vlv_punit_read(dev_priv,
+						  PUNIT_REG_GPU_FREQ_STS);
+			act_freq = (act_freq >> 8) & 0xff;
+			mutex_unlock(&dev_priv->pcu_lock);
+		} else {
+			act_freq = intel_get_cagf(dev_priv,
+						  I915_READ(GEN6_RPSTAT1));
+		}
+		intel_runtime_pm_put(dev_priv);
+	}
+
 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
 	seq_printf(m, "GPU busy? %s [%d requests]\n",
 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 	seq_printf(m, "Boosts outstanding? %d\n",
 		   atomic_read(&rps->num_waiters));
 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
-	seq_printf(m, "Frequency requested %d\n",
-		   intel_gpu_freq(dev_priv, rps->cur_freq));
+	seq_printf(m, "Frequency requested %d, actual %d\n",
+		   intel_gpu_freq(dev_priv, rps->cur_freq),
+		   intel_gpu_freq(dev_priv, act_freq));
 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
 		   intel_gpu_freq(dev_priv, rps->min_freq),
 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
 		   CSR_VERSION_MINOR(csr->version));
 
-	if (IS_KABYLAKE(dev_priv) ||
-	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
-		seq_printf(m, "DC3 -> DC5 count: %d\n",
-			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
+	if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+		goto out;
+
+	seq_printf(m, "DC3 -> DC5 count: %d\n",
+		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+						    SKL_CSR_DC3_DC5_COUNT));
+	if (!IS_GEN9_LP(dev_priv))
 		seq_printf(m, "DC5 -> DC6 count: %d\n",
 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
-	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
-		seq_printf(m, "DC3 -> DC5 count: %d\n",
-			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
-	}
 
 out:
 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
 	seq_printf(m, "connector %d: type %s, status: %s\n",
 		   connector->base.id, connector->name,
 		   drm_get_connector_status_name(connector->status));
-	if (connector->status == connector_status_connected) {
-		seq_printf(m, "\tname: %s\n", connector->display_info.name);
-		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
-			   connector->display_info.width_mm,
-			   connector->display_info.height_mm);
-		seq_printf(m, "\tsubpixel order: %s\n",
-			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
-		seq_printf(m, "\tCEA rev: %d\n",
-			   connector->display_info.cea_rev);
-	}
+
+	if (connector->status == connector_status_disconnected)
+		return;
+
+	seq_printf(m, "\tname: %s\n", connector->display_info.name);
+	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+		   connector->display_info.width_mm,
+		   connector->display_info.height_mm);
+	seq_printf(m, "\tsubpixel order: %s\n",
+		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
 
 	if (!intel_encoder)
 		return;
@@ -3355,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-	struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
-	int i;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+	struct i915_wa *wa;
+	unsigned int i;
 
-	seq_printf(m, "Workarounds applied: %d\n", wa->count);
-	for (i = 0; i < wa->count; ++i)
+	seq_printf(m, "Workarounds applied: %u\n", wal->count);
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
-			   wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
 
 	return 0;
 }
@@ -3421,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
-	struct skl_ddb_allocation *ddb;
 	struct skl_ddb_entry *entry;
-	enum pipe pipe;
-	int plane;
+	struct intel_crtc *crtc;
 
 	if (INTEL_GEN(dev_priv) < 9)
 		return -ENODEV;
 
 	drm_modeset_lock_all(dev);
 
-	ddb = &dev_priv->wm.skl_hw.ddb;
-
 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
 
-	for_each_pipe(dev_priv, pipe) {
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+		enum pipe pipe = crtc->pipe;
+		enum plane_id plane_id;
+
 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
 
-		for_each_universal_plane(dev_priv, pipe, plane) {
-			entry = &ddb->plane[pipe][plane];
-			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
+		for_each_plane_id_on_crtc(crtc, plane_id) {
+			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
 				   entry->start, entry->end,
 				   skl_ddb_entry_size(entry));
 		}
 
-		entry = &ddb->plane[pipe][PLANE_CURSOR];
+		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
 			   entry->end, skl_ddb_entry_size(entry));
 	}
@@ -4172,6 +4195,7 @@ i915_drop_caches_set(void *data, u64 val)
 
 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
 		  val, val & DROP_ALL);
+	intel_runtime_pm_get(i915);
 
 	if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
 		i915_gem_set_wedged(i915);
@@ -4181,7 +4205,7 @@ i915_drop_caches_set(void *data, u64 val)
 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 		if (ret)
-			return ret;
+			goto out;
 
 		if (val & DROP_ACTIVE)
 			ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4213,8 @@ i915_drop_caches_set(void *data, u64 val)
 						     I915_WAIT_LOCKED,
 						     MAX_SCHEDULE_TIMEOUT);
 
-		if (ret == 0 && val & DROP_RESET_SEQNO) {
-			intel_runtime_pm_get(i915);
+		if (ret == 0 && val & DROP_RESET_SEQNO)
 			ret = i915_gem_set_global_seqno(&i915->drm, 1);
-			intel_runtime_pm_put(i915);
-		}
 
 		if (val & DROP_RETIRE)
 			i915_retire_requests(i915);
@@ -4231,6 +4252,9 @@ i915_drop_caches_set(void *data, u64 val)
 	if (val & DROP_FREED)
 		i915_gem_drain_freed_objects(i915);
 
+out:
+	intel_runtime_pm_put(i915);
+
 	return ret;
 }
 
@@ -4331,7 +4355,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
 	for (s = 0; s < info->sseu.max_slices; s++) {
 		/*
 		 * FIXME: Valid SS Mask respects the spec and read
-		 * only valid bits for those registers, excluding reserverd
+		 * only valid bits for those registers, excluding reserved
 		 * although this seems wrong because it would leave many
 		 * subslices without ACK.
 		 */
@@ -4571,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = m->private;
 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
 
+	/* Synchronize with everything first in case there's been an HPD
+	 * storm, but we haven't finished handling it in the kernel yet
+	 */
+	synchronize_irq(dev_priv->drm.irq);
+	flush_work(&dev_priv->hotplug.dig_port_work);
+	flush_work(&dev_priv->hotplug.hotplug_work);
+
 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
 	seq_printf(m, "Detected: %s\n",
 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
@@ -4641,24 +4672,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
 	.write = i915_hpd_storm_ctl_write
 };
 
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+
+	seq_printf(m, "Enabled: %s\n",
+		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+	return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_hpd_short_storm_ctl_show,
+			   inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+					      const char __user *ubuf,
+					      size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	char *newline;
+	char tmp[16];
+	int i;
+	bool new_state;
+
+	if (len >= sizeof(tmp))
+		return -EINVAL;
+
+	if (copy_from_user(tmp, ubuf, len))
+		return -EFAULT;
+
+	tmp[len] = '\0';
+
+	/* Strip newline, if any */
+	newline = strchr(tmp, '\n');
+	if (newline)
+		*newline = '\0';
+
+	/* Reset to the "default" state for this system */
+	if (strcmp(tmp, "reset") == 0)
+		new_state = !HAS_DP_MST(dev_priv);
+	else if (kstrtobool(tmp, &new_state) != 0)
+		return -EINVAL;
+
+	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
+		      new_state ? "En" : "Dis");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	hotplug->hpd_short_storm_enabled = new_state;
+	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
+	for_each_hpd_pin(i)
+		hotplug->stats[i].count = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/* Re-enable hpd immediately if we were in an irq storm */
+	flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+	return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_hpd_short_storm_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_hpd_short_storm_ctl_write,
+};
+
 static int i915_drrs_ctl_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *intel_crtc;
-	struct intel_encoder *encoder;
-	struct intel_dp *intel_dp;
+	struct intel_crtc *crtc;
 
 	if (INTEL_GEN(dev_priv) < 7)
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
-	for_each_intel_crtc(dev, intel_crtc) {
-		if (!intel_crtc->base.state->active ||
-					!intel_crtc->config->has_drrs)
-			continue;
+	for_each_intel_crtc(dev, crtc) {
+		struct drm_connector_list_iter conn_iter;
+		struct intel_crtc_state *crtc_state;
+		struct drm_connector *connector;
+		struct drm_crtc_commit *commit;
+		int ret;
 
-		for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+		if (ret)
+			return ret;
+
+		crtc_state = to_intel_crtc_state(crtc->base.state);
+
+		if (!crtc_state->base.active ||
+		    !crtc_state->has_drrs)
+			goto out;
+
+		commit = crtc_state->base.commit;
+		if (commit) {
+			ret = wait_for_completion_interruptible(&commit->hw_done);
+			if (ret)
+				goto out;
+		}
+
+		drm_connector_list_iter_begin(dev, &conn_iter);
+		drm_for_each_connector_iter(connector, &conn_iter) {
+			struct intel_encoder *encoder;
+			struct intel_dp *intel_dp;
+
+			if (!(crtc_state->base.connector_mask &
+			      drm_connector_mask(connector)))
+				continue;
+
+			encoder = intel_attached_encoder(connector);
 			if (encoder->type != INTEL_OUTPUT_EDP)
 				continue;
 
@@ -4668,13 +4797,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
 			intel_dp = enc_to_intel_dp(&encoder->base);
 			if (val)
 				intel_edp_drrs_enable(intel_dp,
-							intel_crtc->config);
+						      crtc_state);
 			else
 				intel_edp_drrs_disable(intel_dp,
-							intel_crtc->config);
+						       crtc_state);
 		}
+		drm_connector_list_iter_end(&conn_iter);
+
+out:
+		drm_modeset_unlock(&crtc->base.mutex);
+		if (ret)
+			return ret;
 	}
-	drm_modeset_unlock_all(dev);
 
 	return 0;
 }
@@ -4818,6 +4952,7 @@ static const struct i915_debugfs_files {
 	{"i915_guc_log_level", &i915_guc_log_level_fops},
 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
 	{"i915_ipc_status", &i915_ipc_status_fops},
 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5034,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
 			continue;
 
 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
-		if (err <= 0) {
-			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
-				  size, b->offset, err);
-			continue;
-		}
-
-		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+		if (err < 0)
+			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
+		else
+			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
 	}
 
 	return 0;
@@ -4934,6 +5066,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(i915_panel);
 
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	/* HDCP is supported by connector */
+	if (!intel_connector->hdcp.shim)
+		return -EINVAL;
+
+	seq_printf(m, "%s:%d HDCP version: ", connector->name,
+		   connector->base.id);
+	seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
+		   "None" : "HDCP1.4");
+	seq_puts(m, "\n");
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -4963,5 +5117,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
 				    connector, &i915_psr_sink_status_fops);
 	}
 
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+				    connector, &i915_hdcp_sink_capability_fops);
+	}
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 47062ee..b310a89 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
 #include "i915_vgpu.h"
 #include "intel_drv.h"
 #include "intel_uc.h"
+#include "intel_workarounds.h"
 
 static struct drm_driver driver;
 
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 	 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
 	 * display.
 	 */
-	if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+	if (pch && !HAS_DISPLAY(dev_priv)) {
 		DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
 		dev_priv->pch_type = PCH_NOP;
 		dev_priv->pch_id = 0;
@@ -345,7 +346,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
 		value = HAS_WT(dev_priv);
 		break;
 	case I915_PARAM_HAS_ALIASING_PPGTT:
-		value = USES_PPGTT(dev_priv);
+		value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
 		break;
 	case I915_PARAM_HAS_SEMAPHORES:
 		value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +646,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (i915_inject_load_failure())
 		return -ENODEV;
 
+	if (HAS_DISPLAY(dev_priv)) {
+		ret = drm_vblank_init(&dev_priv->drm,
+				      INTEL_INFO(dev_priv)->num_pipes);
+		if (ret)
+			goto out;
+	}
+
 	intel_bios_init(dev_priv);
 
 	/* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,9 +695,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (ret)
 		goto cleanup_modeset;
 
-	intel_setup_overlay(dev_priv);
+	intel_overlay_setup(dev_priv);
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return 0;
 
 	ret = intel_fbdev_init(dev);
@@ -699,6 +707,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	/* Only enable hotplug handling once the fbdev is fully set up. */
 	intel_hpd_init(dev_priv);
 
+	intel_init_ipc(dev_priv);
+
 	return 0;
 
 cleanup_gem:
@@ -859,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 	pre |= IS_HSW_EARLY_SDV(dev_priv);
 	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
 	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
 
 	if (pre) {
 		DRM_ERROR("This is a pre-production stepping. "
@@ -1030,6 +1041,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 
 err_uncore:
 	intel_uncore_fini(dev_priv);
+	i915_mmio_cleanup(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 
@@ -1049,17 +1061,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 
 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 {
-	/*
-	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
-	 * user's requested state against the hardware/driver capabilities.  We
-	 * do this now so that we can print out any log messages once rather
-	 * than every time we check intel_enable_ppgtt().
-	 */
-	i915_modparams.enable_ppgtt =
-		intel_sanitize_enable_ppgtt(dev_priv,
-					    i915_modparams.enable_ppgtt);
-	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
-
 	intel_gvt_sanitize_options(dev_priv);
 }
 
@@ -1340,7 +1341,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
 	/* Need to calculate bandwidth only for Gen9 */
 	if (IS_BROXTON(dev_priv))
 		ret = bxt_get_dram_info(dev_priv);
-	else if (INTEL_GEN(dev_priv) == 9)
+	else if (IS_GEN9(dev_priv))
 		ret = skl_get_dram_info(dev_priv);
 	else
 		ret = skl_dram_get_channels_info(dev_priv);
@@ -1375,6 +1376,29 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
 
+	if (HAS_PPGTT(dev_priv)) {
+		if (intel_vgpu_active(dev_priv) &&
+		    !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+			i915_report_error(dev_priv,
+					  "incompatible vGPU found, support for isolated ppGTT required\n");
+			return -ENXIO;
+		}
+	}
+
+	if (HAS_EXECLISTS(dev_priv)) {
+		/*
+		 * Older GVT emulation depends upon intercepting CSB mmio,
+		 * which we no longer use, preferring to use the HWSP cache
+		 * instead.
+		 */
+		if (intel_vgpu_active(dev_priv) &&
+		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+			i915_report_error(dev_priv,
+					  "old vGPU host found, support for HWSP emulation required\n");
+			return -ENXIO;
+		}
+	}
+
 	intel_sanitize_options(dev_priv);
 
 	i915_perf_init(dev_priv);
@@ -1544,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	} else
 		DRM_ERROR("Failed to register driver for userspace access!\n");
 
-	if (INTEL_INFO(dev_priv)->num_pipes) {
+	if (HAS_DISPLAY(dev_priv)) {
 		/* Must be done after probing outputs */
 		intel_opregion_register(dev_priv);
 		acpi_video_register();
@@ -1568,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	 * We need to coordinate the hotplugs with the asynchronous fbdev
 	 * configuration, for which we use the fbdev->async_cookie.
 	 */
-	if (INTEL_INFO(dev_priv)->num_pipes)
+	if (HAS_DISPLAY(dev_priv))
 		drm_kms_helper_poll_init(dev);
 
 	intel_power_domains_enable(dev_priv);
@@ -1631,14 +1655,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
 		(struct intel_device_info *)ent->driver_data;
 	struct intel_device_info *device_info;
 	struct drm_i915_private *i915;
+	int err;
 
 	i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
 	if (!i915)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
-	if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+	err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
+	if (err) {
 		kfree(i915);
-		return NULL;
+		return ERR_PTR(err);
 	}
 
 	i915->drm.pdev = pdev;
@@ -1651,8 +1677,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
 	device_info->device_id = pdev->device;
 
 	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
-		     sizeof(device_info->platform_mask) * BITS_PER_BYTE);
-	BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+		     BITS_PER_TYPE(device_info->platform_mask));
+	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
 
 	return i915;
 }
@@ -1687,8 +1713,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 	int ret;
 
 	dev_priv = i915_driver_create(pdev, ent);
-	if (!dev_priv)
-		return -ENOMEM;
+	if (IS_ERR(dev_priv))
+		return PTR_ERR(dev_priv);
 
 	/* Disable nuclear pageflip by default on pre-ILK */
 	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1712,26 +1738,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ret < 0)
 		goto out_cleanup_mmio;
 
-	/*
-	 * TODO: move the vblank init and parts of modeset init steps into one
-	 * of the i915_driver_init_/i915_driver_register functions according
-	 * to the role/effect of the given init step.
-	 */
-	if (INTEL_INFO(dev_priv)->num_pipes) {
-		ret = drm_vblank_init(&dev_priv->drm,
-				      INTEL_INFO(dev_priv)->num_pipes);
-		if (ret)
-			goto out_cleanup_hw;
-	}
-
 	ret = i915_load_modeset_init(&dev_priv->drm);
 	if (ret < 0)
 		goto out_cleanup_hw;
 
 	i915_driver_register(dev_priv);
 
-	intel_init_ipc(dev_priv);
-
 	enable_rpm_wakeref_asserts(dev_priv);
 
 	i915_welcome_messages(dev_priv);
@@ -1783,7 +1795,6 @@ void i915_driver_unload(struct drm_device *dev)
 	i915_reset_error_state(dev_priv);
 
 	i915_gem_fini(dev_priv);
-	intel_fbc_cleanup_cfb(dev_priv);
 
 	intel_power_domains_fini_hw(dev_priv);
 
@@ -1921,9 +1932,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 	i915_save_state(dev_priv);
 
 	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
-	intel_opregion_notify_adapter(dev_priv, opregion_target_state);
-
-	intel_opregion_unregister(dev_priv);
+	intel_opregion_suspend(dev_priv, opregion_target_state);
 
 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 
@@ -1964,7 +1973,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 				    get_suspend_mode(dev_priv, hibernation));
 
 	ret = 0;
-	if (IS_GEN9_LP(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
 		bxt_enable_dc9(dev_priv);
 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		hsw_enable_pc8(dev_priv);
@@ -2042,7 +2051,6 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	i915_restore_state(dev_priv);
 	intel_pps_unlock_regs_wa(dev_priv);
-	intel_opregion_setup(dev_priv);
 
 	intel_init_pch_refclk(dev_priv);
 
@@ -2084,12 +2092,10 @@ static int i915_drm_resume(struct drm_device *dev)
 	 * */
 	intel_hpd_init(dev_priv);
 
-	intel_opregion_register(dev_priv);
+	intel_opregion_resume(dev_priv);
 
 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
-	intel_opregion_notify_adapter(dev_priv, PCI_D0);
-
 	intel_power_domains_enable(dev_priv);
 
 	enable_rpm_wakeref_asserts(dev_priv);
@@ -2157,7 +2163,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_uncore_resume_early(dev_priv);
 
-	if (IS_GEN9_LP(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
 		gen9_sanitize_dc_state(dev_priv);
 		bxt_disable_dc9(dev_priv);
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2924,7 +2930,10 @@ static int intel_runtime_suspend(struct device *kdev)
 	intel_uncore_suspend(dev_priv);
 
 	ret = 0;
-	if (IS_GEN9_LP(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
+		icl_display_core_uninit(dev_priv);
+		bxt_enable_dc9(dev_priv);
+	} else if (IS_GEN9_LP(dev_priv)) {
 		bxt_display_core_uninit(dev_priv);
 		bxt_enable_dc9(dev_priv);
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,7 +3018,18 @@ static int intel_runtime_resume(struct device *kdev)
 	if (intel_uncore_unclaimed_mmio(dev_priv))
 		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
-	if (IS_GEN9_LP(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 11) {
+		bxt_disable_dc9(dev_priv);
+		icl_display_core_init(dev_priv, true);
+		if (dev_priv->csr.dmc_payload) {
+			if (dev_priv->csr.allowed_dc_mask &
+			    DC_STATE_EN_UPTO_DC6)
+				skl_enable_dc6(dev_priv);
+			else if (dev_priv->csr.allowed_dc_mask &
+				 DC_STATE_EN_UPTO_DC5)
+				gen9_enable_dc5(dev_priv);
+		}
+	} else if (IS_GEN9_LP(dev_priv)) {
 		bxt_disable_dc9(dev_priv);
 		bxt_display_core_init(dev_priv, true);
 		if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 872a2e1..b1c3196 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,9 @@
 #include <drm/drm_auth.h>
 #include <drm/drm_cache.h>
 #include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
 
+#include "i915_fixed.h"
 #include "i915_params.h"
 #include "i915_reg.h"
 #include "i915_utils.h"
@@ -88,8 +90,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20180921"
-#define DRIVER_TIMESTAMP	1537521997
+#define DRIVER_DATE		"20181204"
+#define DRIVER_TIMESTAMP	1543944377
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -128,144 +130,6 @@ bool i915_error_injected(void);
 	__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
 		      fmt, ##__VA_ARGS__)
 
-typedef struct {
-	uint32_t val;
-} uint_fixed_16_16_t;
-
-#define FP_16_16_MAX ({ \
-	uint_fixed_16_16_t fp; \
-	fp.val = UINT_MAX; \
-	fp; \
-})
-
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
-{
-	if (val.val == 0)
-		return true;
-	return false;
-}
-
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
-{
-	uint_fixed_16_16_t fp;
-
-	WARN_ON(val > U16_MAX);
-
-	fp.val = val << 16;
-	return fp;
-}
-
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
-{
-	return DIV_ROUND_UP(fp.val, 1 << 16);
-}
-
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
-{
-	return fp.val >> 16;
-}
-
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
-						 uint_fixed_16_16_t min2)
-{
-	uint_fixed_16_16_t min;
-
-	min.val = min(min1.val, min2.val);
-	return min;
-}
-
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
-						 uint_fixed_16_16_t max2)
-{
-	uint_fixed_16_16_t max;
-
-	max.val = max(max1.val, max2.val);
-	return max;
-}
-
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
-{
-	uint_fixed_16_16_t fp;
-	WARN_ON(val > U32_MAX);
-	fp.val = (uint32_t) val;
-	return fp;
-}
-
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
-					    uint_fixed_16_16_t d)
-{
-	return DIV_ROUND_UP(val.val, d.val);
-}
-
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
-						uint_fixed_16_16_t mul)
-{
-	uint64_t intermediate_val;
-
-	intermediate_val = (uint64_t) val * mul.val;
-	intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
-	WARN_ON(intermediate_val > U32_MAX);
-	return (uint32_t) intermediate_val;
-}
-
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
-					     uint_fixed_16_16_t mul)
-{
-	uint64_t intermediate_val;
-
-	intermediate_val = (uint64_t) val.val * mul.val;
-	intermediate_val = intermediate_val >> 16;
-	return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
-{
-	uint64_t interm_val;
-
-	interm_val = (uint64_t)val << 16;
-	interm_val = DIV_ROUND_UP_ULL(interm_val, d);
-	return clamp_u64_to_fixed16(interm_val);
-}
-
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
-						uint_fixed_16_16_t d)
-{
-	uint64_t interm_val;
-
-	interm_val = (uint64_t)val << 16;
-	interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
-	WARN_ON(interm_val > U32_MAX);
-	return (uint32_t) interm_val;
-}
-
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
-						     uint_fixed_16_16_t mul)
-{
-	uint64_t intermediate_val;
-
-	intermediate_val = (uint64_t) val * mul.val;
-	return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
-					     uint_fixed_16_16_t add2)
-{
-	uint64_t interm_sum;
-
-	interm_sum = (uint64_t) add1.val + add2.val;
-	return clamp_u64_to_fixed16(interm_sum);
-}
-
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
-						 uint32_t add2)
-{
-	uint64_t interm_sum;
-	uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
-
-	interm_sum = (uint64_t) add1.val + interm_add2.val;
-	return clamp_u64_to_fixed16(interm_sum);
-}
-
 enum hpd_pin {
 	HPD_NONE = 0,
 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
@@ -284,7 +148,8 @@ enum hpd_pin {
 #define for_each_hpd_pin(__pin) \
 	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 
-#define HPD_STORM_DEFAULT_THRESHOLD 5
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
 
 struct i915_hotplug {
 	struct work_struct hotplug_work;
@@ -309,6 +174,8 @@ struct i915_hotplug {
 	bool poll_enabled;
 
 	unsigned int hpd_storm_threshold;
+	/* Whether or not to count short HPD IRQs in HPD storms */
+	u8 hpd_short_storm_enabled;
 
 	/*
 	 * if we get a HPD irq from DP and a HPD irq from non-DP
@@ -466,8 +333,10 @@ struct drm_i915_display_funcs {
 struct intel_csr {
 	struct work_struct work;
 	const char *fw_path;
+	uint32_t required_version;
+	uint32_t max_fw_size; /* bytes */
 	uint32_t *dmc_payload;
-	uint32_t dmc_fw_size;
+	uint32_t dmc_fw_size; /* dwords */
 	uint32_t version;
 	uint32_t mmio_count;
 	i915_reg_t mmioaddr[8];
@@ -547,6 +416,8 @@ struct intel_fbc {
 			int adjusted_y;
 
 			int y;
+
+			uint16_t pixel_blend_mode;
 		} plane;
 
 		struct {
@@ -625,17 +496,19 @@ struct i915_psr {
 	bool sink_support;
 	bool prepared, enabled;
 	struct intel_dp *dp;
+	enum pipe pipe;
 	bool active;
 	struct work_struct work;
 	unsigned busy_frontbuffer_bits;
 	bool sink_psr2_support;
 	bool link_standby;
 	bool colorimetry_support;
-	bool alpm;
 	bool psr2_enabled;
 	u8 sink_sync_latency;
 	ktime_t last_entry_attempt;
 	ktime_t last_exit;
+	bool sink_not_reliable;
+	bool irq_aux_error;
 };
 
 enum intel_pch {
@@ -919,6 +792,11 @@ struct i915_power_well_desc {
 			/* The pw is backing the VGA functionality */
 			bool has_vga:1;
 			bool has_fuses:1;
+			/*
+			 * The pw is for an ICL+ TypeC PHY port in
+			 * Thunderbolt mode.
+			 */
+			bool is_tc_tbt:1;
 		} hsw;
 	};
 	const struct i915_power_well_ops *ops;
@@ -1043,17 +921,6 @@ struct i915_gem_mm {
 
 #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
 
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define DDC_PIN_B  0x05
-#define DDC_PIN_C  0x04
-#define DDC_PIN_D  0x06
-
 struct ddi_vbt_port_info {
 	int max_tmds_clock;
 
@@ -1100,6 +967,7 @@ struct intel_vbt_data {
 	unsigned int panel_type:4;
 	int lvds_ssc_freq;
 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+	enum drm_panel_orientation orientation;
 
 	enum drrs_support_type drrs_type;
 
@@ -1145,6 +1013,7 @@ struct intel_vbt_data {
 		u8 *data;
 		const u8 *sequence[MIPI_SEQ_MAX];
 		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+		enum drm_panel_orientation orientation;
 	} dsi;
 
 	int crt_ddc_pin;
@@ -1229,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 }
 
 struct skl_ddb_allocation {
-	/* packed/y */
-	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
-	struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 	u8 enabled_slices; /* GEN11 has configurable 2 slices */
 };
 
@@ -1241,9 +1107,9 @@ struct skl_ddb_values {
 };
 
 struct skl_wm_level {
-	bool plane_en;
 	uint16_t plane_res_b;
 	uint8_t plane_res_l;
+	bool plane_en;
 };
 
 /* Stores plane specific WM parameters */
@@ -1324,20 +1190,6 @@ struct i915_frontbuffer_tracking {
 	unsigned flip_bits;
 };
 
-struct i915_wa_reg {
-	u32 addr;
-	u32 value;
-	/* bitmask representing WA bits */
-	u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
-	struct i915_wa_reg reg[I915_MAX_WA_REGS];
-	u32 count;
-};
-
 struct i915_virtual_gpu {
 	bool active;
 	u32 caps;
@@ -1521,30 +1373,12 @@ struct i915_oa_ops {
 	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
 
 	/**
-	 * @init_oa_buffer: Resets the head and tail pointers of the
-	 * circular buffer for periodic OA reports.
-	 *
-	 * Called when first opening a stream for OA metrics, but also may be
-	 * called in response to an OA buffer overflow or other error
-	 * condition.
-	 *
-	 * Note it may be necessary to clear the full OA buffer here as part of
-	 * maintaining the invariable that new reports must be written to
-	 * zeroed memory for us to be able to reliable detect if an expected
-	 * report has not yet landed in memory.  (At least on Haswell the OA
-	 * buffer tail pointer is not synchronized with reports being visible
-	 * to the CPU)
-	 */
-	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
-
-	/**
 	 * @enable_metric_set: Selects and applies any MUX configuration to set
 	 * up the Boolean and Custom (B/C) counters that are part of the
 	 * counter reports being sampled. May apply system constraints such as
 	 * disabling EU clock gating as required.
 	 */
-	int (*enable_metric_set)(struct drm_i915_private *dev_priv,
-				 const struct i915_oa_config *oa_config);
+	int (*enable_metric_set)(struct i915_perf_stream *stream);
 
 	/**
 	 * @disable_metric_set: Remove system constraints associated with using
@@ -1555,12 +1389,12 @@ struct i915_oa_ops {
 	/**
 	 * @oa_enable: Enable periodic sampling
 	 */
-	void (*oa_enable)(struct drm_i915_private *dev_priv);
+	void (*oa_enable)(struct i915_perf_stream *stream);
 
 	/**
 	 * @oa_disable: Disable periodic sampling
 	 */
-	void (*oa_disable)(struct drm_i915_private *dev_priv);
+	void (*oa_disable)(struct i915_perf_stream *stream);
 
 	/**
 	 * @read: Copy data from the circular OA buffer into a given userspace
@@ -1805,7 +1639,6 @@ struct drm_i915_private {
 
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
-	struct i915_workarounds workarounds;
 	struct i915_wa_list gt_wa_list;
 
 	struct i915_frontbuffer_tracking fb_tracking;
@@ -2326,6 +2159,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
 
+bool i915_sg_trim(struct sg_table *orig_st);
+
 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
 {
 	unsigned int page_sizes;
@@ -2371,20 +2206,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define REVID_FOREVER		0xff
 #define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
 
-#define GEN_FOREVER (0)
-
 #define INTEL_GEN_MASK(s, e) ( \
 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
-	GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
-		(s) != GEN_FOREVER ? (s) - 1 : 0) \
-)
+	GENMASK((e) - 1, (s) - 1))
 
-/*
- * Returns true if Gen is in inclusive range [Start, End].
- *
- * Use GEN_FOREVER for unbound start and or end.
- */
+/* Returns true if Gen is in inclusive range [Start, End] */
 #define IS_GEN(dev_priv, s, e) \
 	(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
 
@@ -2465,6 +2292,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
 				 INTEL_DEVID(dev_priv) == 0x5915 || \
 				 INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x591C || \
+				 INTEL_DEVID(dev_priv) == 0x87C0)
 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 (dev_priv)->info.gt == 2)
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
@@ -2596,17 +2425,22 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
-#define USES_PPGTT(dev_priv)		(i915_modparams.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt == 3)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define HAS_PPGTT(dev_priv) \
+	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(dev_priv) \
+	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define HAS_FULL_48BIT_PPGTT(dev_priv)	\
+	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
+
 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
 	GEM_BUG_ON((sizes) == 0); \
 	((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
 })
 
-#define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.display.has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
-		((dev_priv)->info.overlay_needs_physical)
+		((dev_priv)->info.display.overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2627,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
 					 !(IS_I915G(dev_priv) || \
 					 IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv)		((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv)		((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.display.has_hotplug)
 
 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv)	((dev_priv)->info.display.has_fbc)
 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
 
 #define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
-#define HAS_DP_MST(dev_priv)	((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv)	((dev_priv)->info.display.has_dp_mst)
 
-#define HAS_DDI(dev_priv)		 ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv)		 ((dev_priv)->info.display.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv)		 ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv)		 ((dev_priv)->info.display.has_psr)
 
 #define HAS_RC6(dev_priv)		 ((dev_priv)->info.has_rc6)
 #define HAS_RC6p(dev_priv)		 ((dev_priv)->info.has_rc6p)
 #define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
 
-#define HAS_CSR(dev_priv)	((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv)	((dev_priv)->info.display.has_csr)
 
 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
 
-#define HAS_IPC(dev_priv)		 ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv)		 ((dev_priv)->info.display.has_ipc)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
@@ -2712,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
 
 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
 
@@ -2724,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GEN9_FREQ_SCALER 3
 
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
 #include "i915_trace.h"
 
 static inline bool intel_vtd_active(void)
@@ -2746,9 +2582,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
 	return IS_BROXTON(dev_priv) && intel_vtd_active();
 }
 
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-				int enable_ppgtt);
-
 /* i915_drv.c */
 void __printf(3, 4)
 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3233,7 +3066,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 int __must_check
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3465,6 +3298,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
 				     enum port port);
 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
 				enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
 
 /* intel_acpi.c */
 #ifdef CONFIG_ACPI
@@ -3486,8 +3320,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
 extern void intel_modeset_init_hw(struct drm_device *dev);
 extern int intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_connector_register(struct drm_connector *);
-extern void intel_connector_unregister(struct drm_connector *);
 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
 				       bool state);
 extern void intel_display_resume(struct drm_device *dev);
@@ -3500,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
 				       bool interactive);
 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
 				  bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
 
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
@@ -3587,6 +3422,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
 			 const struct intel_crtc_state *old_crtc_state);
 
+/* intel_combo_phy.c */
+void icl_combo_phys_init(struct drm_i915_private *dev_priv);
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 0000000..591dd89
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_FIXED_H_
+#define _I915_FIXED_H_
+
+typedef struct {
+	u32 val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
+
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+	return val.val == 0;
+}
+
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
+{
+	uint_fixed_16_16_t fp = { .val = val << 16 };
+
+	WARN_ON(val > U16_MAX);
+
+	return fp;
+}
+
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+	return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
+{
+	return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
+					     uint_fixed_16_16_t min2)
+{
+	uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
+
+	return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
+					     uint_fixed_16_16_t max2)
+{
+	uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
+
+	return max;
+}
+
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
+{
+	uint_fixed_16_16_t fp = { .val = (u32)val };
+
+	WARN_ON(val > U32_MAX);
+
+	return fp;
+}
+
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+				       uint_fixed_16_16_t d)
+{
+	return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+	u64 tmp;
+
+	tmp = (u64)val * mul.val;
+	tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
+	WARN_ON(tmp > U32_MAX);
+
+	return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+					     uint_fixed_16_16_t mul)
+{
+	u64 tmp;
+
+	tmp = (u64)val.val * mul.val;
+	tmp = tmp >> 16;
+
+	return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
+{
+	u64 tmp;
+
+	tmp = (u64)val << 16;
+	tmp = DIV_ROUND_UP_ULL(tmp, d);
+
+	return clamp_u64_to_fixed16(tmp);
+}
+
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
+{
+	u64 tmp;
+
+	tmp = (u64)val << 16;
+	tmp = DIV_ROUND_UP_ULL(tmp, d.val);
+	WARN_ON(tmp > U32_MAX);
+
+	return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+	u64 tmp;
+
+	tmp = (u64)val * mul.val;
+
+	return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+					     uint_fixed_16_16_t add2)
+{
+	u64 tmp;
+
+	tmp = (u64)add1.val + add2.val;
+
+	return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+						 u32 add2)
+{
+	uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
+	u64 tmp;
+
+	tmp = (u64)add1.val + tmp_add2.val;
+
+	return clamp_u64_to_fixed16(tmp);
+}
+
+#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6ae9a60..d36a975 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	 */
 	err = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_PRIORITY |
 				   (write_domain ? I915_WAIT_ALL : 0),
 				   MAX_SCHEDULE_TIMEOUT,
 				   to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
 }
 
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+	check_move_unevictable_pages(pvec);
+	__pagevec_release(pvec);
+	cond_resched();
+}
+
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 			      struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
+	struct pagevec pvec;
 	struct page *page;
 
 	__i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_save_bit_17_swizzle(obj, pages);
 
+	mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+	pagevec_init(&pvec);
 	for_each_sgt_page(page, sgt_iter, pages) {
 		if (obj->mm.dirty)
 			set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 		if (obj->mm.madv == I915_MADV_WILLNEED)
 			mark_page_accessed(page);
 
-		put_page(page);
+		if (!pagevec_add(&pvec, page))
+			check_release_pagevec(&pvec);
 	}
+	if (pagevec_count(&pvec))
+		check_release_pagevec(&pvec);
 	obj->mm.dirty = false;
 
 	sg_free_table(pages);
@@ -2483,7 +2502,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	mutex_unlock(&obj->mm.lock);
 }
 
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
 {
 	struct sg_table new_st;
 	struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
 	unsigned int max_segment = i915_sg_segment_size();
 	unsigned int sg_page_sizes;
+	struct pagevec pvec;
 	gfp_t noreclaim;
 	int ret;
 
@@ -2559,6 +2579,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	 * Fail silently without starting the shrinker
 	 */
 	mapping = obj->base.filp->f_mapping;
+	mapping_set_unevictable(mapping);
 	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
 	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
 
@@ -2573,6 +2594,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 		gfp_t gfp = noreclaim;
 
 		do {
+			cond_resched();
 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
 			if (likely(!IS_ERR(page)))
 				break;
@@ -2583,7 +2605,6 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 			}
 
 			i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
-			cond_resched();
 
 			/*
 			 * We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 err_sg:
 	sg_mark_end(sg);
 err_pages:
-	for_each_sgt_page(page, sgt_iter, st)
-		put_page(page);
+	mapping_clear_unevictable(mapping);
+	pagevec_init(&pvec);
+	for_each_sgt_page(page, sgt_iter, st) {
+		if (!pagevec_add(&pvec, page))
+			check_release_pagevec(&pvec);
+	}
+	if (pagevec_count(&pvec))
+		check_release_pagevec(&pvec);
 	sg_free_table(st);
 	kfree(st);
 
@@ -3282,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct i915_request *request)
 {
-	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
-		  request->engine->name,
-		  request->fence.context, request->fence.seqno);
-	dma_fence_set_error(&request->fence, -EIO);
-
-	i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
 	unsigned long flags;
 
 	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3327,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * rolling the global seqno forward (since this would complete requests
 	 * for which we haven't set the fence error to EIO yet).
 	 */
-	for_each_engine(engine, i915, id) {
+	for_each_engine(engine, i915, id)
 		i915_gem_reset_prepare_engine(engine);
 
-		engine->submit_request = nop_submit_request;
-		engine->schedule = NULL;
-	}
-	i915->caps.scheduler = 0;
-
 	/* Even if the GPU reset fails, it should still stop the engines */
 	if (INTEL_GEN(i915) >= 5)
 		intel_gpu_reset(i915, ALL_ENGINES);
 
-	/*
-	 * Make sure no one is running the old callback before we proceed with
-	 * cancelling requests and resetting the completion tracking. Otherwise
-	 * we might submit a request to the hardware which never completes.
-	 */
-	synchronize_rcu();
-
 	for_each_engine(engine, i915, id) {
-		/* Mark all executing requests as skipped */
-		engine->cancel_requests(engine);
-
-		/*
-		 * Only once we've force-cancelled all in-flight requests can we
-		 * start to complete all requests.
-		 */
-		engine->submit_request = nop_complete_submit_request;
+		engine->submit_request = nop_submit_request;
+		engine->schedule = NULL;
 	}
+	i915->caps.scheduler = 0;
 
 	/*
 	 * Make sure no request can slip through without getting completed by
 	 * either this call here to intel_engine_init_global_seqno, or the one
-	 * in nop_complete_submit_request.
+	 * in nop_submit_request.
 	 */
 	synchronize_rcu();
 
+	/* Mark all executing requests as skipped */
+	for_each_engine(engine, i915, id)
+		engine->cancel_requests(engine);
+
 	for_each_engine(engine, i915, id) {
-		unsigned long flags;
-
-		/*
-		 * Mark all pending requests as complete so that any concurrent
-		 * (lockless) lookup doesn't try and wait upon the request as we
-		 * reset it.
-		 */
-		spin_lock_irqsave(&engine->timeline.lock, flags);
-		intel_engine_init_global_seqno(engine,
-					       intel_engine_last_submit(engine));
-		spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
 		i915_gem_reset_finish_engine(engine);
+		intel_engine_wakeup(engine);
 	}
 
 out:
@@ -3530,6 +3523,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
 	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
 	struct drm_i915_private *i915 = s->i915;
 
+	destroy_rcu_head(&s->rcu);
+
 	if (same_epoch(i915, s->epoch)) {
 		INIT_WORK(&s->work, __sleep_work);
 		queue_work(i915->wq, &s->work);
@@ -3646,6 +3641,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	if (same_epoch(dev_priv, epoch)) {
 		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
 		if (s) {
+			init_rcu_head(&s->rcu);
 			s->i915 = dev_priv;
 			s->epoch = epoch;
 			call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3739,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	start = ktime_get();
 
 	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_PRIORITY |
+				   I915_WAIT_ALL,
 				   to_wait_timeout(args->timeout_ns),
 				   to_rps_client(file));
 
@@ -4710,6 +4708,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->lut_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
+	init_rcu_head(&obj->rcu);
+
 	obj->ops = ops;
 
 	reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +4977,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
 	/*
+	 * We reuse obj->rcu for the freed list, so we had better not treat
+	 * it like a rcu_head from this point forwards. And we expect all
+	 * objects to be freed via this path.
+	 */
+	destroy_rcu_head(&obj->rcu);
+
+	/*
 	 * Since we require blocking on struct_mutex to unbind the freed
 	 * object from the GPU before releasing resources back to the
 	 * system, we can not do that directly from the RCU callback (which may
@@ -5293,19 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
 			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
-	if (HAS_PCH_NOP(dev_priv)) {
-		if (IS_IVYBRIDGE(dev_priv)) {
-			u32 temp = I915_READ(GEN7_MSG_CTL);
-			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-			I915_WRITE(GEN7_MSG_CTL, temp);
-		} else if (INTEL_GEN(dev_priv) >= 7) {
-			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
-			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
-			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
-		}
-	}
-
+	/* Apply the GT workarounds... */
 	intel_gt_apply_workarounds(dev_priv);
+	/* ...and determine whether they are sticking. */
+	intel_gt_verify_workarounds(dev_priv, "init");
 
 	i915_gem_init_swizzling(dev_priv);
 
@@ -6001,7 +5999,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 	 * the bits.
 	 */
 	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
-		     sizeof(atomic_t) * BITS_PER_BYTE);
+		     BITS_PER_TYPE(atomic_t));
 
 	if (old) {
 		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6..b0e4b97 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
 #define GEM_DEBUG_DECL(var) var
 #define GEM_DEBUG_EXEC(expr) expr
 #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
 
 #else
 
 #define GEM_SHOW_DEBUG() (0)
 
 #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
-#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
 
 #define GEM_DEBUG_DECL(var)
 #define GEM_DEBUG_EXEC(expr) do { } while (0)
 #define GEM_DEBUG_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
 #endif
 
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593..371c070 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->sched.priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
 		struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 	if (IS_ERR(ctx))
 		return ctx;
 
-	if (USES_FULL_PPGTT(dev_priv)) {
+	if (HAS_FULL_PPGTT(dev_priv)) {
 		struct i915_hw_ppgtt *ppgtt;
 
 		ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (ret)
 		return ERR_PTR(ret);
 
-	ctx = __create_hw_context(to_i915(dev), NULL);
+	ctx = i915_gem_create_context(to_i915(dev), NULL);
 	if (IS_ERR(ctx))
 		goto out;
 
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->sched.priority = prio;
+	ctx->sched.priority = I915_USER_PRIORITY(prio);
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
-	int ret;
 
 	/* Reassure ourselves we are only called once */
 	GEM_BUG_ON(dev_priv->kernel_context);
 	GEM_BUG_ON(dev_priv->preempt_context);
 
-	ret = intel_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
+	intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
 	init_contexts(dev_priv);
 
 	/* lowest priority; idle task */
@@ -879,7 +875,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->sched.priority;
+		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
 		break;
 	default:
 		ret = -EINVAL;
@@ -948,7 +944,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->sched.priority = priority;
+				ctx->sched.priority =
+					I915_USER_PRIORITY(priority);
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6..f6d870b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
 	/** engine: per-engine logical HW state */
 	struct intel_context {
 		struct i915_gem_context *gem_context;
+		struct intel_engine_cs *active;
 		struct i915_vma *state;
 		struct intel_ring *ring;
 		u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1aaccbe..786d719 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2186,7 +2186,7 @@ signal_fence_array(struct i915_execbuffer *eb,
 		if (!(flags & I915_EXEC_FENCE_SIGNAL))
 			continue;
 
-		drm_syncobj_replace_fence(syncobj, 0, fence);
+		drm_syncobj_replace_fence(syncobj, fence);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 07999fe..add1fe7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 	i915->ggtt.invalidate(i915);
 }
 
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-			       	int enable_ppgtt)
-{
-	bool has_full_ppgtt;
-	bool has_full_48bit_ppgtt;
-
-	if (!dev_priv->info.has_aliasing_ppgtt)
-		return 0;
-
-	has_full_ppgtt = dev_priv->info.has_full_ppgtt;
-	has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
-
-	if (intel_vgpu_active(dev_priv)) {
-		/* GVT-g has no support for 32bit ppgtt */
-		has_full_ppgtt = false;
-		has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
-	}
-
-	/*
-	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
-	 * execlists, the sole mechanism available to submit work.
-	 */
-	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
-		return 0;
-
-	if (enable_ppgtt == 1)
-		return 1;
-
-	if (enable_ppgtt == 2 && has_full_ppgtt)
-		return 2;
-
-	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
-		return 3;
-
-	/* Disable ppgtt on SNB if VT-d is on. */
-	if (IS_GEN6(dev_priv) && intel_vtd_active()) {
-		DRM_INFO("Disabling PPGTT because VT-d is on\n");
-		return 0;
-	}
-
-	if (has_full_48bit_ppgtt)
-		return 3;
-
-	if (has_full_ppgtt)
-		return 2;
-
-	return 1;
-}
-
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 }
 
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 #define gen8_pdpe_encode gen8_pde_encode
 #define gen8_pml4e_encode gen8_pde_encode
 
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 	 * region, including any PTEs which happen to point to scratch.
 	 *
 	 * This is only relevant for the 48b PPGTT where we support
-	 * huge-gtt-pages, see also i915_vma_insert().
-	 *
-	 * TODO: we should really consider write-protecting the scratch-page and
-	 * sharing between ppgtt
+	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+	 * scratch (read-only) between all vm, we create one 64k scratch page
+	 * for all.
 	 */
 	size = I915_GTT_PAGE_SIZE_4K;
 	if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 static void gen8_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill_px(vm, pt,
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+	fill_px(vm, pt, vm->scratch_pte);
 }
 
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+	fill32_px(vm, pt, vm->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 /* Removes entries from a single page table, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries.
  */
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 				struct i915_page_table *pt,
 				u64 start, u64 length)
 {
 	unsigned int num_entries = gen8_pte_count(start, length);
 	unsigned int pte = gen8_pte_index(start);
 	unsigned int pte_end = pte + num_entries;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	gen8_pte_t *vaddr;
 
 	GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 
 	vaddr = kmap_atomic_px(pt);
 	while (pte < pte_end)
-		vaddr[pte++] = scratch_pte;
+		vaddr[pte++] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 
 	return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
 				u16 i;
 
-				encode = pte_encode | vma->vm->scratch_page.daddr;
+				encode = vma->vm->scratch_pte;
 				vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
 
 				for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 {
 	int ret;
 
+	/*
+	 * If everybody agrees to not to write into the scratch page,
+	 * we can reuse it for all vm, keeping contexts and processes separate.
+	 */
+	if (vm->has_read_only &&
+	    vm->i915->kernel_context &&
+	    vm->i915->kernel_context->ppgtt) {
+		struct i915_address_space *clone =
+			&vm->i915->kernel_context->ppgtt->vm;
+
+		GEM_BUG_ON(!clone->has_read_only);
+
+		vm->scratch_page.order = clone->scratch_page.order;
+		vm->scratch_pte = clone->scratch_pte;
+		vm->scratch_pt  = clone->scratch_pt;
+		vm->scratch_pd  = clone->scratch_pd;
+		vm->scratch_pdp = clone->scratch_pdp;
+		return 0;
+	}
+
 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
 	if (ret)
 		return ret;
 
+	vm->scratch_pte =
+		gen8_pte_encode(vm->scratch_page.daddr,
+				I915_CACHE_LLC,
+				PTE_READ_ONLY);
+
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
 		ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 
 static void gen8_free_scratch(struct i915_address_space *vm)
 {
+	if (!vm->scratch_page.daddr)
+		return;
+
 	if (use_4lvl(vm))
 		free_pdp(vm, vm->scratch_pdp);
 	free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
 	struct i915_address_space *vm = &ppgtt->vm;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	u64 start = 0, length = ppgtt->vm.total;
 
 	if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	ppgtt->vm.i915 = i915;
 	ppgtt->vm.dma = &i915->drm.pdev->dev;
 
-	ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
+	ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
 		1ULL << 48 :
 		1ULL << 32;
 
-	/*
-	 * From bdw, there is support for read-only pages in the PPGTT.
-	 *
-	 * XXX GVT is not honouring the lack of RW in the PTE bits.
-	 */
-	ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+	/* From bdw, there is support for read-only pages in the PPGTT. */
+	ppgtt->vm.has_read_only = true;
 
 	i915_address_space_init(&ppgtt->vm, i915);
 
@@ -1721,7 +1691,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = base->vm.scratch_pte;
 	struct i915_page_table *pt;
 	u32 pte, pde;
 
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
 		  ppgtt->pd_addr + pde);
 }
 
-static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	for_each_engine(engine, dev_priv, id) {
-		u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
-				 GEN8_GFX_PPGTT_48B : 0;
-		I915_WRITE(RING_MODE_GEN7(engine),
-			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
-	}
-}
-
 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
 	ecochk = I915_READ(GAM_ECOCHK);
 	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
-	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned int pde = first_entry / GEN6_PTES;
 	unsigned int pte = first_entry % GEN6_PTES;
 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = vm->scratch_pte;
 
 	while (num_entries) {
 		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 			if (IS_ERR(pt))
 				goto unwind_out;
 
-			gen6_initialize_pt(ppgtt, pt);
+			gen6_initialize_pt(vm, pt);
 			ppgtt->base.pd.page_table[pde] = pt;
 
 			if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ppgtt->scratch_pte =
-		vm->pte_encode(vm->scratch_page.daddr,
-			       I915_CACHE_NONE, PTE_READ_ONLY);
+	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+					 I915_CACHE_NONE,
+					 PTE_READ_ONLY);
 
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 		return PTR_ERR(vm->scratch_pt);
 	}
 
-	gen6_initialize_pt(ppgtt, vm->scratch_pt);
+	gen6_initialize_pt(vm, vm->scratch_pt);
 	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
 		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
 {
 	gtt_write_workarounds(dev_priv);
 
-	/* In the case of execlists, PPGTT is enabled by the context descriptor
-	 * and the PDPs are contained within the context itself.  We don't
-	 * need to do anything here. */
-	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
-		return 0;
-
-	if (!USES_PPGTT(dev_priv))
-		return 0;
-
 	if (IS_GEN6(dev_priv))
 		gen6_ppgtt_enable(dev_priv);
 	else if (IS_GEN7(dev_priv))
 		gen7_ppgtt_enable(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 8)
-		gen8_ppgtt_enable(dev_priv);
-	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
 
 	return 0;
 }
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
 	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	gen8_pte_t __iomem *gtt_base =
 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, 0);
+	scratch_pte = vm->scratch_pte;
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	/* And finally clear the reserved guard page */
 	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
-	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
+	if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
 		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
 		if (ret)
 			goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 		return ret;
 	}
 
+	ggtt->vm.scratch_pte =
+		ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+				    I915_CACHE_NONE, 0);
+
 	return 0;
 }
 
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
 	ppat->match = bdw_private_pat_match;
 	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
 
-	if (!USES_PPGTT(ppat->i915)) {
+	if (!HAS_PPGTT(ppat->i915)) {
 		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
 		 * so RTL will always use the value corresponding to
 		 * pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->vm.cleanup = gen6_gmch_remove;
 	ggtt->vm.insert_page = gen8_ggtt_insert_page;
 	ggtt->vm.clear_range = nop_clear_range;
-	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+	if (intel_scanout_needs_vtd_wa(dev_priv))
 		ggtt->vm.clear_range = gen8_ggtt_clear_range;
 
 	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3427,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
+	ggtt->vm.pte_encode = gen8_pte_encode;
+
 	setup_private_pat(dev_priv);
 
 	return ggtt_probe_common(ggtt, size);
@@ -3614,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	/* Only VLV supports read-only GGTT mappings */
 	ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
 
-	if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
+	if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
 		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
@@ -3716,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 }
 
 static struct scatterlist *
-rotate_pages(const dma_addr_t *in, unsigned int offset,
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
 	     unsigned int width, unsigned int height,
 	     unsigned int stride,
 	     struct sg_table *st, struct scatterlist *sg)
@@ -3725,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
 	unsigned int src_idx;
 
 	for (column = 0; column < width; column++) {
-		src_idx = stride * (height - 1) + column;
+		src_idx = stride * (height - 1) + column + offset;
 		for (row = 0; row < height; row++) {
 			st->nents++;
 			/* We don't need the pages, but need to initialize
@@ -3733,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
 			 * The only thing we need are DMA addresses.
 			 */
 			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
-			sg_dma_address(sg) = in[offset + src_idx];
+			sg_dma_address(sg) =
+				i915_gem_object_get_dma_address(obj, src_idx);
 			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
 			sg = sg_next(sg);
 			src_idx -= stride;
@@ -3747,22 +3697,11 @@ static noinline struct sg_table *
 intel_rotate_pages(struct intel_rotation_info *rot_info,
 		   struct drm_i915_gem_object *obj)
 {
-	const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
 	unsigned int size = intel_rotation_info_size(rot_info);
-	struct sgt_iter sgt_iter;
-	dma_addr_t dma_addr;
-	unsigned long i;
-	dma_addr_t *page_addr_list;
 	struct sg_table *st;
 	struct scatterlist *sg;
 	int ret = -ENOMEM;
-
-	/* Allocate a temporary list of source pages for random access. */
-	page_addr_list = kvmalloc_array(n_pages,
-					sizeof(dma_addr_t),
-					GFP_KERNEL);
-	if (!page_addr_list)
-		return ERR_PTR(ret);
+	int i;
 
 	/* Allocate target SG list. */
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3773,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
 	if (ret)
 		goto err_sg_alloc;
 
-	/* Populate source page list from the object. */
-	i = 0;
-	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
-		page_addr_list[i++] = dma_addr;
-
-	GEM_BUG_ON(i != n_pages);
 	st->nents = 0;
 	sg = st->sgl;
 
 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
-		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+		sg = rotate_pages(obj, rot_info->plane[i].offset,
 				  rot_info->plane[i].width, rot_info->plane[i].height,
 				  rot_info->plane[i].stride, st, sg);
 	}
 
-	kvfree(page_addr_list);
-
 	return st;
 
 err_sg_alloc:
 	kfree(st);
 err_st_alloc:
-	kvfree(page_addr_list);
 
 	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
 			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3840,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
 		count -= len >> PAGE_SHIFT;
 		if (count == 0) {
 			sg_mark_end(sg);
+			i915_sg_trim(st); /* Drop any unused tail entries. */
+
 			return st;
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2803929..4874da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
 
 	struct mutex mutex; /* protects vma and our lists */
 
+	u64 scratch_pte;
 	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
 	bool has_read_only:1;
 
-	/* FIXME: Need a more generic return type */
-	gen6_pte_t (*pte_encode)(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags); /* Create a valid PTE */
-	/* flags for pte_encode */
+	u64 (*pte_encode)(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags); /* Create a valid PTE */
 #define PTE_READ_ONLY	(1<<0)
+
 	int (*allocate_va_range)(struct i915_address_space *vm,
 				 u64 start, u64 length);
 	void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
 
 	struct i915_vma *vma;
 	gen6_pte_t __iomem *pd_addr;
-	gen6_pte_t scratch_pte;
 
 	unsigned int pin_count;
 	bool scan_for_unused_pt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index db4128d..0746512 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
  *
  */
 
-#include <generated/utsrelease.h>
-#include <linux/stop_machine.h>
-#include <linux/zlib.h>
-#include <drm/drm_print.h>
 #include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
+#include <linux/stop_machine.h>
+#include <linux/utsname.h>
+#include <linux/zlib.h>
+
+#include <drm/drm_print.h>
 
 #include "i915_gpu_error.h"
 #include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
 	return purgeable ? " purgeable" : "";
 }
 
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+			 void *addr, unsigned int len, loff_t it)
 {
-
-	if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
-		e->err = -ENOSPC;
-		return false;
-	}
-
-	if (e->bytes == e->size - 1 || e->err)
-		return false;
-
-	return true;
+	sg->page_link = (unsigned long)virt_to_page(addr);
+	sg->offset = offset_in_page(addr);
+	sg->length = len;
+	sg->dma_address = it;
 }
 
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
-			      unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
 {
-	if (e->pos + len <= e->start) {
-		e->pos += len;
+	if (!len)
 		return false;
+
+	if (e->bytes + len + 1 <= e->size)
+		return true;
+
+	if (e->bytes) {
+		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+		e->iter += e->bytes;
+		e->buf = NULL;
+		e->bytes = 0;
 	}
 
-	/* First vsnprintf needs to fit in its entirety for memmove */
-	if (len >= e->size) {
-		e->err = -EIO;
-		return false;
-	}
+	if (e->cur == e->end) {
+		struct scatterlist *sgl;
 
-	return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
-				 unsigned len)
-{
-	/* If this is first printf in this window, adjust it so that
-	 * start position matches start of the buffer
-	 */
-
-	if (e->pos < e->start) {
-		const size_t off = e->start - e->pos;
-
-		/* Should not happen but be paranoid */
-		if (off > len || e->bytes) {
-			e->err = -EIO;
-			return;
+		sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+		if (!sgl) {
+			e->err = -ENOMEM;
+			return false;
 		}
 
-		memmove(e->buf, e->buf + off, len - off);
-		e->bytes = len - off;
-		e->pos = e->start;
-		return;
+		if (e->cur) {
+			e->cur->offset = 0;
+			e->cur->length = 0;
+			e->cur->page_link =
+				(unsigned long)sgl | SG_CHAIN;
+		} else {
+			e->sgl = sgl;
+		}
+
+		e->cur = sgl;
+		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
 	}
 
-	e->bytes += len;
-	e->pos += len;
+	e->size = ALIGN(len + 1, SZ_64K);
+	e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+	if (!e->buf) {
+		e->size = PAGE_ALIGN(len + 1);
+		e->buf = kmalloc(e->size, GFP_KERNEL);
+	}
+	if (!e->buf) {
+		e->err = -ENOMEM;
+		return false;
+	}
+
+	return true;
 }
 
 __printf(2, 0)
 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
-			       const char *f, va_list args)
+			       const char *fmt, va_list args)
 {
-	unsigned len;
+	va_list ap;
+	int len;
 
-	if (!__i915_error_ok(e))
+	if (e->err)
 		return;
 
-	/* Seek the first printf which is hits start position */
-	if (e->pos < e->start) {
-		va_list tmp;
-
-		va_copy(tmp, args);
-		len = vsnprintf(NULL, 0, f, tmp);
-		va_end(tmp);
-
-		if (!__i915_error_seek(e, len))
-			return;
+	va_copy(ap, args);
+	len = vsnprintf(NULL, 0, fmt, ap);
+	va_end(ap);
+	if (len <= 0) {
+		e->err = len;
+		return;
 	}
 
-	len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
-	if (len >= e->size - e->bytes)
-		len = e->size - e->bytes - 1;
+	if (!__i915_error_grow(e, len))
+		return;
 
-	__i915_error_advance(e, len);
+	GEM_BUG_ON(e->bytes >= e->size);
+	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+	if (len < 0) {
+		e->err = len;
+		return;
+	}
+	e->bytes += len;
 }
 
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
-			    const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
 {
 	unsigned len;
 
-	if (!__i915_error_ok(e))
+	if (e->err || !str)
 		return;
 
 	len = strlen(str);
+	if (!__i915_error_grow(e, len))
+		return;
 
-	/* Seek the first printf which is hits start position */
-	if (e->pos < e->start) {
-		if (!__i915_error_seek(e, len))
-			return;
-	}
-
-	if (len >= e->size - e->bytes)
-		len = e->size - e->bytes - 1;
+	GEM_BUG_ON(e->bytes + len > e->size);
 	memcpy(e->buf + e->bytes, str, len);
-
-	__i915_error_advance(e, len);
+	e->bytes += len;
 }
 
 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
 
 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
 			return -EIO;
+
+		touch_nmi_watchdog();
 	} while (zstream->avail_in);
 
 	/* Fallback to uncompressed if we increase size? */
@@ -512,7 +515,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 			err_printf(m, "  SYNC_2: 0x%08x\n",
 				   ee->semaphore_mboxes[2]);
 	}
-	if (USES_PPGTT(m->i915)) {
+	if (HAS_PPGTT(m->i915)) {
 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
 
 		if (INTEL_GEN(m->i915) >= 8) {
@@ -635,25 +638,33 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
 	print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
 }
 
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
-			    const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
 {
-	struct drm_i915_private *dev_priv = m->i915;
+	while (sgl) {
+		struct scatterlist *sg;
+
+		for (sg = sgl; !sg_is_chain(sg); sg++) {
+			kfree(sg_virt(sg));
+			if (sg_is_last(sg))
+				break;
+		}
+
+		sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+		free_page((unsigned long)sgl);
+		sgl = sg;
+	}
+}
+
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+			       struct i915_gpu_state *error)
+{
 	struct drm_i915_error_object *obj;
 	struct timespec64 ts;
 	int i, j;
 
-	if (!error) {
-		err_printf(m, "No error state collected\n");
-		return 0;
-	}
-
-	if (IS_ERR(error))
-		return PTR_ERR(error);
-
 	if (*error->error_msg)
 		err_printf(m, "%s\n", error->error_msg);
-	err_printf(m, "Kernel: " UTS_RELEASE "\n");
+	err_printf(m, "Kernel: %s\n", init_utsname()->release);
 	ts = ktime_to_timespec64(error->time);
 	err_printf(m, "Time: %lld s %ld us\n",
 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
-	err_print_pciid(m, error->i915);
+	err_print_pciid(m, m->i915);
 
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
-	if (HAS_CSR(dev_priv)) {
-		struct intel_csr *csr = &dev_priv->csr;
+	if (HAS_CSR(m->i915)) {
+		struct intel_csr *csr = &m->i915->csr;
 
 		err_printf(m, "DMC loaded: %s\n",
 			   yesno(csr->dmc_payload != NULL));
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
-	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+	err_printf(m, "Missed interrupts: 0x%08lx\n",
+		   m->i915->gpu_error.missed_irq_rings);
 
 	for (i = 0; i < error->nfence; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
-	if (INTEL_GEN(dev_priv) >= 6) {
+	if (INTEL_GEN(m->i915) >= 6) {
 		err_printf(m, "ERROR: 0x%08x\n", error->error);
 
-		if (INTEL_GEN(dev_priv) >= 8)
+		if (INTEL_GEN(m->i915) >= 8)
 			err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
 				   error->fault_data1, error->fault_data0);
 
 		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
-	if (IS_GEN7(dev_priv))
+	if (IS_GEN7(m->i915))
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 			len += scnprintf(buf + len, sizeof(buf), "%s%s",
 					 first ? "" : ", ",
-					 dev_priv->engine[j]->name);
+					 m->i915->engine[j]->name);
 			first = 0;
 		}
 		scnprintf(buf + len, sizeof(buf), ")");
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		obj = ee->batchbuffer;
 		if (obj) {
-			err_puts(m, dev_priv->engine[i]->name);
+			err_puts(m, m->i915->engine[i]->name);
 			if (ee->context.pid)
 				err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
 					   ee->context.comm,
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, dev_priv->engine[i], NULL, obj);
+			print_error_obj(m, m->i915->engine[i], NULL, obj);
 		}
 
 		for (j = 0; j < ee->user_bo_count; j++)
-			print_error_obj(m, dev_priv->engine[i],
+			print_error_obj(m, m->i915->engine[i],
 					"user", ee->user_bo[j]);
 
 		if (ee->num_requests) {
 			err_printf(m, "%s --- %d requests\n",
-				   dev_priv->engine[i]->name,
+				   m->i915->engine[i]->name,
 				   ee->num_requests);
 			for (j = 0; j < ee->num_requests; j++)
 				error_print_request(m, " ",
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		if (IS_ERR(ee->waiters)) {
 			err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
-				   dev_priv->engine[i]->name);
+				   m->i915->engine[i]->name);
 		} else if (ee->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
-				   dev_priv->engine[i]->name,
+				   m->i915->engine[i]->name,
 				   ee->num_waiters);
 			for (j = 0; j < ee->num_waiters; j++) {
 				err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"ringbuffer", ee->ringbuffer);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"HW Status", ee->hws_page);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"HW context", ee->ctx);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"WA context", ee->wa_ctx);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"WA batchbuffer", ee->wa_batchbuffer);
 
-		print_error_obj(m, dev_priv->engine[i],
+		print_error_obj(m, m->i915->engine[i],
 				"NULL context", ee->default_state);
 	}
 
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_print_capabilities(m, &error->device_info, &error->driver_caps);
 	err_print_params(m, &error->params);
 	err_print_uc(m, &error->uc);
+}
 
-	if (m->bytes == 0 && m->err)
-		return m->err;
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+	struct drm_i915_error_state_buf m;
+
+	if (IS_ERR(error))
+		return PTR_ERR(error);
+
+	if (READ_ONCE(error->sgl))
+		return 0;
+
+	memset(&m, 0, sizeof(m));
+	m.i915 = error->i915;
+
+	__err_print_to_sgl(&m, error);
+
+	if (m.buf) {
+		__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+		m.bytes = 0;
+		m.buf = NULL;
+	}
+	if (m.cur) {
+		GEM_BUG_ON(m.end < m.cur);
+		sg_mark_end(m.cur - 1);
+	}
+	GEM_BUG_ON(m.sgl && !m.cur);
+
+	if (m.err) {
+		err_free_sgl(m.sgl);
+		return m.err;
+	}
+
+	if (cmpxchg(&error->sgl, NULL, m.sgl))
+		err_free_sgl(m.sgl);
 
 	return 0;
 }
 
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
-			      struct drm_i915_private *i915,
-			      size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+				      char *buf, loff_t off, size_t rem)
 {
-	memset(ebuf, 0, sizeof(*ebuf));
-	ebuf->i915 = i915;
+	struct scatterlist *sg;
+	size_t count;
+	loff_t pos;
+	int err;
 
-	/* We need to have enough room to store any i915_error_state printf
-	 * so that we can move it to start position.
-	 */
-	ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
-	ebuf->buf = kmalloc(ebuf->size,
-				GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+	if (!error || !rem)
+		return 0;
 
-	if (ebuf->buf == NULL) {
-		ebuf->size = PAGE_SIZE;
-		ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
-	}
+	err = err_print_to_sgl(error);
+	if (err)
+		return err;
 
-	if (ebuf->buf == NULL) {
-		ebuf->size = 128;
-		ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
-	}
+	sg = READ_ONCE(error->fit);
+	if (!sg || off < sg->dma_address)
+		sg = error->sgl;
+	if (!sg)
+		return 0;
 
-	if (ebuf->buf == NULL)
-		return -ENOMEM;
+	pos = sg->dma_address;
+	count = 0;
+	do {
+		size_t len, start;
 
-	ebuf->start = pos;
+		if (sg_is_chain(sg)) {
+			sg = sg_chain_ptr(sg);
+			GEM_BUG_ON(sg_is_chain(sg));
+		}
 
-	return 0;
+		len = sg->length;
+		if (pos + len <= off) {
+			pos += len;
+			continue;
+		}
+
+		start = sg->offset;
+		if (pos < off) {
+			GEM_BUG_ON(off - pos > len);
+			len -= off - pos;
+			start += off - pos;
+			pos = off;
+		}
+
+		len = min(len, rem);
+		GEM_BUG_ON(!len || len > sg->length);
+
+		memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+		count += len;
+		pos += len;
+
+		buf += len;
+		rem -= len;
+		if (!rem) {
+			WRITE_ONCE(error->fit, sg);
+			break;
+		}
+	} while (!sg_is_last(sg++));
+
+	return count;
 }
 
 static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
 	cleanup_params(error);
 	cleanup_uc_state(error);
 
+	err_free_sgl(error->sgl);
 	kfree(error);
 }
 
@@ -1002,7 +1079,6 @@ i915_error_object_create(struct drm_i915_private *i915,
 	}
 
 	compress_fini(&compress, dst);
-	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
 	return dst;
 }
 
@@ -1271,7 +1347,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
 						  engine);
 
-	if (USES_PPGTT(dev_priv)) {
+	if (HAS_PPGTT(dev_priv)) {
 		int i;
 
 		ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1788,6 +1864,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
 	return epoch;
 }
 
+static void capture_finish(struct i915_gpu_state *error)
+{
+	struct i915_ggtt *ggtt = &error->i915->ggtt;
+	const u64 slot = ggtt->error_capture.start;
+
+	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
 static int capture(void *data)
 {
 	struct i915_gpu_state *error = data;
@@ -1812,6 +1896,7 @@ static int capture(void *data)
 
 	error->epoch = capture_find_epoch(error);
 
+	capture_finish(error);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 3ec89a5..ff2652b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+	struct scatterlist *sgl, *fit;
 };
 
 struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
 
 struct drm_i915_error_state_buf {
 	struct drm_i915_private *i915;
-	unsigned int bytes;
-	unsigned int size;
+	struct scatterlist *sgl, *cur, *end;
+
+	char *buf;
+	size_t bytes;
+	size_t size;
+	loff_t iter;
+
 	int err;
-	u8 *buf;
-	loff_t start;
-	loff_t pos;
 };
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
-			    const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
-			      struct drm_i915_private *i915,
-			      size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
-	kfree(eb->buf);
-}
 
 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
 void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
 	return gpu;
 }
 
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+				      char *buf, loff_t offset, size_t count);
+
 void __i915_gpu_state_free(struct kref *kref);
 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
 {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e24227..d447d7d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 	return ret;
 }
 
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
+{
+	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
+
+	/*
+	 * Now with master disabled, get a sample of level indications
+	 * for this interrupt. Indications will be cleared on related acks.
+	 * New indications can and will light up during processing,
+	 * and will generate new interrupt after enabling master.
+	 */
+	return raw_reg_read(regs, GEN8_MASTER_IRQ);
+}
+
+static inline void gen8_master_intr_enable(void __iomem * const regs)
+{
+	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+}
+
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
 	struct drm_i915_private *dev_priv = to_i915(arg);
+	void __iomem * const regs = dev_priv->regs;
 	u32 master_ctl;
 	u32 gt_iir[4];
 
 	if (!intel_irqs_enabled(dev_priv))
 		return IRQ_NONE;
 
-	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
-	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
-	if (!master_ctl)
+	master_ctl = gen8_master_intr_disable(regs);
+	if (!master_ctl) {
+		gen8_master_intr_enable(regs);
 		return IRQ_NONE;
-
-	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+	}
 
 	/* Find, clear, then process each source of interrupt */
 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 		enable_rpm_wakeref_asserts(dev_priv);
 	}
 
-	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+	gen8_master_intr_enable(regs);
 
 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
 
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
 		intel_opregion_asle_intr(dev_priv);
 }
 
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
+{
+	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+	/*
+	 * Now with master disabled, get a sample of level indications
+	 * for this interrupt. Indications will be cleared on related acks.
+	 * New indications can and will light up during processing,
+	 * and will generate new interrupt after enabling master.
+	 */
+	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+}
+
+static inline void gen11_master_intr_enable(void __iomem * const regs)
+{
+	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+}
+
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
 	struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 	if (!intel_irqs_enabled(i915))
 		return IRQ_NONE;
 
-	master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
-	master_ctl &= ~GEN11_MASTER_IRQ;
-	if (!master_ctl)
+	master_ctl = gen11_master_intr_disable(regs);
+	if (!master_ctl) {
+		gen11_master_intr_enable(regs);
 		return IRQ_NONE;
-
-	/* Disable interrupts. */
-	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+	}
 
 	/* Find, clear, then process each source of interrupt. */
 	gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 
 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
-	/* Acknowledge and enable interrupts. */
-	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+	gen11_master_intr_enable(regs);
 
 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int pipe;
 
-	I915_WRITE(GEN8_MASTER_IRQ, 0);
-	POSTING_READ(GEN8_MASTER_IRQ);
+	gen8_master_intr_disable(dev_priv->regs);
 
 	gen8_gt_irq_reset(dev_priv);
 
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 
-	I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
-	POSTING_READ(GEN11_GFX_MSTR_IRQ);
+	gen11_master_intr_disable(dev_priv->regs);
 
 	gen11_gt_irq_reset(dev_priv);
 
 	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
 
+	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
 	for_each_pipe(dev_priv, pipe)
 		if (intel_display_power_is_enabled(dev_priv,
 						   POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_postinstall(dev);
 
-	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-	POSTING_READ(GEN8_MASTER_IRQ);
+	gen8_master_intr_enable(dev_priv->regs);
 
 	return 0;
 }
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
 
 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
-	I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
-	POSTING_READ(GEN11_GFX_MSTR_IRQ);
+	gen11_master_intr_enable(dev_priv->regs);
 
 	return 0;
 }
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev_priv->display_irqs_enabled = false;
 
 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+	/* If we have MST support, we want to avoid doing short HPD IRQ storm
+	 * detection, as short HPD storms will occur as a natural part of
+	 * sideband messaging with MST.
+	 * On older platforms however, IRQ storms can occur with both long and
+	 * short pulses, as seen on some G4x systems.
+	 */
+	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
 
 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8..4acdb94 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16..0e667f1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304..a44195c 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963..679e92c 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae3..7f60d51 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268e..4d60255 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facd..a92c38e 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aa..0697f40 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb..71ec889 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b962249..0986eae 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c..5c23d88 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b1..e830a406 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db58..4bdda66 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113..06dedf9 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b9..cc6526f 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d0343..3d0c870 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a566792..baa5142 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24a..24eaa97 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a4..168e49a 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b80354..a55398a 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb667..6ffa553 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c..3ddd348 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd..7ce6ee8 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c..be62560 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7..086ca26 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2..650beb0 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2..b291a6e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd52..8dcf849 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ *
  * Autogenerated file by GPU Top : https://github.com/rib/gputop
  * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
  */
 
 #ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981..2e03565 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
 	"WARNING: Disabling this can cause system wide hangs. "
 	"(default: true)");
 
-i915_param_named_unsafe(enable_ppgtt, int, 0400,
-	"Override PPGTT usage. "
-	"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-
 i915_param_named_unsafe(enable_psr, int, 0600,
 	"Enable PSR "
 	"(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
 i915_param_named(enable_dpcd_backlight, bool, 0600,
 	"Enable support for DPCD backlight control (default:false)");
 
+#if IS_ENABLED(CONFIG_DRM_I915_GVT)
 i915_param_named(enable_gvt, bool, 0400,
 	"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+#endif
 
 static __always_inline void _print_param(struct drm_printer *p,
 					 const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
 	else if (!__builtin_strcmp(type, "char *"))
 		drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
 	else
-		BUILD_BUG();
+		WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
+			  type, name);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a2..7e56c51 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
 	param(int, vbt_sdvo_panel_type, -1) \
 	param(int, enable_dc, -1) \
 	param(int, enable_fbc, -1) \
-	param(int, enable_ppgtt, -1) \
 	param(int, enable_psr, -1) \
 	param(int, disable_power_well, -1) \
 	param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9f..6350db5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -33,19 +33,30 @@
 #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
 
 #define GEN_DEFAULT_PIPEOFFSETS \
-	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
-	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET,	\
+		[TRANSCODER_B] = PIPE_B_OFFSET, \
+		[TRANSCODER_C] = PIPE_C_OFFSET, \
+		[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+		[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+		[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+		[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+	}
 
 #define GEN_CHV_PIPEOFFSETS \
-	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-			  CHV_PIPE_C_OFFSET }, \
-	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-			   CHV_TRANSCODER_C_OFFSET, }, \
-	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
-			     CHV_PALETTE_C_OFFSET }
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET, \
+		[TRANSCODER_B] = PIPE_B_OFFSET, \
+		[TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+		[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+		[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
+	}
 
 #define CURSOR_OFFSETS \
 	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -68,8 +79,9 @@
 #define GEN2_FEATURES \
 	GEN(2), \
 	.num_pipes = 1, \
-	.has_overlay = 1, .overlay_needs_physical = 1, \
-	.has_gmch_display = 1, \
+	.display.has_overlay = 1, \
+	.display.overlay_needs_physical = 1, \
+	.display.has_gmch_display = 1, \
 	.hws_needs_physical = 1, \
 	.unfenced_needs_alignment = 1, \
 	.ring_mask = RENDER_RING, \
@@ -82,7 +94,8 @@
 static const struct intel_device_info intel_i830_info = {
 	GEN2_FEATURES,
 	PLATFORM(INTEL_I830),
-	.is_mobile = 1, .cursor_needs_physical = 1,
+	.is_mobile = 1,
+	.display.cursor_needs_physical = 1,
 	.num_pipes = 2, /* legal, last one wins */
 };
 
@@ -96,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
 	PLATFORM(INTEL_I85X),
 	.is_mobile = 1,
 	.num_pipes = 2, /* legal, last one wins */
-	.cursor_needs_physical = 1,
-	.has_fbc = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_fbc = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
@@ -108,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
 #define GEN3_FEATURES \
 	GEN(3), \
 	.num_pipes = 2, \
-	.has_gmch_display = 1, \
+	.display.has_gmch_display = 1, \
 	.ring_mask = RENDER_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -120,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915G),
 	.has_coherent_ggtt = false,
-	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -130,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I915GM),
 	.is_mobile = 1,
-	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.supports_tv = 1,
-	.has_fbc = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
+	.display.supports_tv = 1,
+	.display.has_fbc = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -141,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
 static const struct intel_device_info intel_i945g_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945G),
-	.has_hotplug = 1, .cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
+	.display.has_hotplug = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -151,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_I945GM),
 	.is_mobile = 1,
-	.has_hotplug = 1, .cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.supports_tv = 1,
-	.has_fbc = 1,
+	.display.has_hotplug = 1,
+	.display.cursor_needs_physical = 1,
+	.display.has_overlay = 1,
+	.display.overlay_needs_physical = 1,
+	.display.supports_tv = 1,
+	.display.has_fbc = 1,
 	.hws_needs_physical = 1,
 	.unfenced_needs_alignment = 1,
 };
@@ -162,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
 static const struct intel_device_info intel_g33_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_G33),
-	.has_hotplug = 1,
-	.has_overlay = 1,
+	.display.has_hotplug = 1,
+	.display.has_overlay = 1,
 };
 
 static const struct intel_device_info intel_pineview_info = {
 	GEN3_FEATURES,
 	PLATFORM(INTEL_PINEVIEW),
 	.is_mobile = 1,
-	.has_hotplug = 1,
-	.has_overlay = 1,
+	.display.has_hotplug = 1,
+	.display.has_overlay = 1,
 };
 
 #define GEN4_FEATURES \
 	GEN(4), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
-	.has_gmch_display = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_gmch_display = 1, \
 	.ring_mask = RENDER_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -189,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_i965g_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965G),
-	.has_overlay = 1,
+	.display.has_overlay = 1,
 	.hws_needs_physical = 1,
 	.has_snoop = false,
 };
@@ -197,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
 static const struct intel_device_info intel_i965gm_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_I965GM),
-	.is_mobile = 1, .has_fbc = 1,
-	.has_overlay = 1,
-	.supports_tv = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
+	.display.has_overlay = 1,
+	.display.supports_tv = 1,
 	.hws_needs_physical = 1,
 	.has_snoop = false,
 };
@@ -213,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
 static const struct intel_device_info intel_gm45_info = {
 	GEN4_FEATURES,
 	PLATFORM(INTEL_GM45),
-	.is_mobile = 1, .has_fbc = 1,
-	.supports_tv = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
+	.display.supports_tv = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 #define GEN5_FEATURES \
 	GEN(5), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
+	.display.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = true, \
@@ -239,20 +260,21 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
 	GEN5_FEATURES,
 	PLATFORM(INTEL_IRONLAKE),
-	.is_mobile = 1, .has_fbc = 1,
+	.is_mobile = 1,
+	.display.has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
 	GEN(6), \
 	.num_pipes = 2, \
-	.has_hotplug = 1, \
-	.has_fbc = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
-	.has_aliasing_ppgtt = 1, \
+	.ppgtt = INTEL_PPGTT_ALIASING, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	GEN_DEFAULT_PAGE_SIZES, \
 	CURSOR_OFFSETS
@@ -290,15 +312,14 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
 #define GEN7_FEATURES  \
 	GEN(7), \
 	.num_pipes = 3, \
-	.has_hotplug = 1, \
-	.has_fbc = 1, \
+	.display.has_hotplug = 1, \
+	.display.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_coherent_ggtt = true, \
 	.has_llc = 1, \
 	.has_rc6 = 1, \
 	.has_rc6p = 1, \
-	.has_aliasing_ppgtt = 1, \
-	.has_full_ppgtt = 1, \
+	.ppgtt = INTEL_PPGTT_FULL, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	GEN_DEFAULT_PAGE_SIZES, \
 	IVB_CURSOR_OFFSETS
@@ -349,10 +370,9 @@ static const struct intel_device_info intel_valleyview_info = {
 	.num_pipes = 2,
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
-	.has_gmch_display = 1,
-	.has_hotplug = 1,
-	.has_aliasing_ppgtt = 1,
-	.has_full_ppgtt = 1,
+	.display.has_gmch_display = 1,
+	.display.has_hotplug = 1,
+	.ppgtt = INTEL_PPGTT_FULL,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -365,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
 #define G75_FEATURES  \
 	GEN7_FEATURES, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
-	.has_ddi = 1, \
+	.display.has_ddi = 1, \
 	.has_fpga_dbg = 1, \
-	.has_psr = 1, \
-	.has_dp_mst = 1, \
+	.display.has_psr = 1, \
+	.display.has_dp_mst = 1, \
 	.has_rc6p = 0 /* RC6p removed-by HSW */, \
 	.has_runtime_pm = 1
 
@@ -399,7 +419,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
 	.page_sizes = I915_GTT_PAGE_SIZE_4K | \
 		      I915_GTT_PAGE_SIZE_2M, \
 	.has_logical_ring_contexts = 1, \
-	.has_full_48bit_ppgtt = 1, \
+	.ppgtt = INTEL_PPGTT_FULL_4LVL, \
 	.has_64bit_reloc = 1, \
 	.has_reset_engine = 1
 
@@ -435,16 +455,15 @@ static const struct intel_device_info intel_cherryview_info = {
 	PLATFORM(INTEL_CHERRYVIEW),
 	GEN(8),
 	.num_pipes = 3,
-	.has_hotplug = 1,
+	.display.has_hotplug = 1,
 	.is_lp = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.has_64bit_reloc = 1,
 	.has_runtime_pm = 1,
 	.has_rc6 = 1,
 	.has_logical_ring_contexts = 1,
-	.has_gmch_display = 1,
-	.has_aliasing_ppgtt = 1,
-	.has_full_ppgtt = 1,
+	.display.has_gmch_display = 1,
+	.ppgtt = INTEL_PPGTT_FULL,
 	.has_reset_engine = 1,
 	.has_snoop = true,
 	.has_coherent_ggtt = false,
@@ -465,13 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
 	GEN(9), \
 	GEN9_DEFAULT_PAGE_SIZES, \
 	.has_logical_ring_preemption = 1, \
-	.has_csr = 1, \
+	.display.has_csr = 1, \
 	.has_guc = 1, \
-	.has_ipc = 1, \
+	.display.has_ipc = 1, \
 	.ddb_size = 896
 
 #define SKL_PLATFORM \
 	GEN9_FEATURES, \
+	/* Display WA #0477 WaDisableIPC: skl */ \
+	.display.has_ipc = 0, \
 	PLATFORM(INTEL_SKYLAKE)
 
 static const struct intel_device_info intel_skylake_gt1_info = {
@@ -502,29 +523,27 @@ static const struct intel_device_info intel_skylake_gt4_info = {
 #define GEN9_LP_FEATURES \
 	GEN(9), \
 	.is_lp = 1, \
-	.has_hotplug = 1, \
+	.display.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
 	.num_pipes = 3, \
 	.has_64bit_reloc = 1, \
-	.has_ddi = 1, \
+	.display.has_ddi = 1, \
 	.has_fpga_dbg = 1, \
-	.has_fbc = 1, \
-	.has_psr = 1, \
+	.display.has_fbc = 1, \
+	.display.has_psr = 1, \
 	.has_runtime_pm = 1, \
 	.has_pooled_eu = 0, \
-	.has_csr = 1, \
+	.display.has_csr = 1, \
 	.has_rc6 = 1, \
-	.has_dp_mst = 1, \
+	.display.has_dp_mst = 1, \
 	.has_logical_ring_contexts = 1, \
 	.has_logical_ring_preemption = 1, \
 	.has_guc = 1, \
-	.has_aliasing_ppgtt = 1, \
-	.has_full_ppgtt = 1, \
-	.has_full_48bit_ppgtt = 1, \
+	.ppgtt = INTEL_PPGTT_FULL_4LVL, \
 	.has_reset_engine = 1, \
 	.has_snoop = true, \
 	.has_coherent_ggtt = false, \
-	.has_ipc = 1, \
+	.display.has_ipc = 1, \
 	GEN9_DEFAULT_PAGE_SIZES, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	IVB_CURSOR_OFFSETS, \
@@ -598,6 +617,22 @@ static const struct intel_device_info intel_cannonlake_info = {
 
 #define GEN11_FEATURES \
 	GEN10_FEATURES, \
+	.pipe_offsets = { \
+		[TRANSCODER_A] = PIPE_A_OFFSET, \
+		[TRANSCODER_B] = PIPE_B_OFFSET, \
+		[TRANSCODER_C] = PIPE_C_OFFSET, \
+		[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+		[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+		[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+	}, \
+	.trans_offsets = { \
+		[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+		[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+		[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+		[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+		[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+		[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+	}, \
 	GEN(11), \
 	.ddb_size = 2048, \
 	.has_logical_ring_elsq = 1
@@ -663,7 +698,7 @@ static const struct pci_device_id pciidlist[] = {
 	INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
 	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
 	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
-	INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
+	INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
 	INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
 	INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +706,7 @@ static const struct pci_device_id pciidlist[] = {
 	INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
 	INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
 	INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+	INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
 	INTEL_CNL_IDS(&intel_cannonlake_info),
 	INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96b..4529edf 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 			  dev_priv->perf.oa.period_exponent);
 
-		dev_priv->perf.oa.ops.oa_disable(dev_priv);
-		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		dev_priv->perf.oa.ops.oa_disable(stream);
+		dev_priv->perf.oa.ops.oa_enable(stream);
 
 		/*
 		 * Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 			  dev_priv->perf.oa.period_exponent);
 
-		dev_priv->perf.oa.ops.oa_disable(dev_priv);
-		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		dev_priv->perf.oa.ops.oa_disable(stream);
+		dev_priv->perf.oa.ops.oa_enable(stream);
 
 		oastatus1 = I915_READ(GEN7_OASTATUS1);
 	}
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
 		goto err_unpin;
 	}
 
-	dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
-
 	DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
 			 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
 			 dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
 	}
 }
 
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
-				 const struct i915_oa_config *oa_config)
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+	const struct i915_oa_config *oa_config = stream->oa_config;
+
 	/* PRM:
 	 *
 	 * OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
-				  const struct i915_oa_config *oa_config)
+static int gen8_enable_metric_set(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+	const struct i915_oa_config *oa_config = stream->oa_config;
 	int ret;
 
 	/*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
 		   I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
 }
 
-static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+static void gen7_oa_enable(struct i915_perf_stream *stream)
 {
-	struct i915_gem_context *ctx =
-			dev_priv->perf.oa.exclusive_stream->ctx;
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct i915_gem_context *ctx = stream->ctx;
 	u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
 	bool periodic = dev_priv->perf.oa.periodic;
 	u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
 		   GEN7_OACONTROL_ENABLE);
 }
 
-static void gen8_oa_enable(struct drm_i915_private *dev_priv)
+static void gen8_oa_enable(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *dev_priv = stream->dev_priv;
 	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
 
 	/*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 
-	dev_priv->perf.oa.ops.oa_enable(dev_priv);
+	dev_priv->perf.oa.ops.oa_enable(stream);
 
 	if (dev_priv->perf.oa.periodic)
 		hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 			      HRTIMER_MODE_REL_PINNED);
 }
 
-static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+static void gen7_oa_disable(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+
 	I915_WRITE(GEN7_OACONTROL, 0);
 	if (intel_wait_for_register(dev_priv,
 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
 		DRM_ERROR("wait for OA to be disabled timed out\n");
 }
 
-static void gen8_oa_disable(struct drm_i915_private *dev_priv)
+static void gen8_oa_disable(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+
 	I915_WRITE(GEN8_OACONTROL, 0);
 	if (intel_wait_for_register(dev_priv,
 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 
-	dev_priv->perf.oa.ops.oa_disable(dev_priv);
+	dev_priv->perf.oa.ops.oa_disable(stream);
 
 	if (dev_priv->perf.oa.periodic)
 		hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 		return -EINVAL;
 	}
 
-	if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+	if (!dev_priv->perf.oa.ops.enable_metric_set) {
 		DRM_DEBUG("OA unit not supported\n");
 		return -ENODEV;
 	}
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	if (ret)
 		goto err_lock;
 
-	ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
-						      stream->oa_config);
+	ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
 	if (ret) {
 		DRM_DEBUG("Unable to enable metric set\n");
 		goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 		dev_priv->perf.oa.ops.is_valid_mux_reg =
 			hsw_is_valid_mux_addr;
 		dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
-		dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
 		dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
 		dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
 		dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 		 */
 		dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
 
-		dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
 		dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
 		dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
 		dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502ee..6fc4b8e 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
 
 	slice_length = sizeof(sseu->slice_mask);
 	subslice_length = sseu->max_slices *
-		DIV_ROUND_UP(sseu->max_subslices,
-			     sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+		DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
 	eu_length = sseu->max_slices * sseu->max_subslices *
 		DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e31c27e..0a7d605 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 /*
  * Named helper wrappers around _PICK_EVEN() and _PICK().
  */
-#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
-#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
-#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
-#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
-#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
-#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _PIPE(pipe, a, b)		_PICK_EVEN(pipe, a, b)
+#define _PLANE(plane, a, b)		_PICK_EVEN(plane, a, b)
+#define _TRANS(tran, a, b)		_PICK_EVEN(tran, a, b)
+#define _PORT(port, a, b)		_PICK_EVEN(port, a, b)
+#define _PLL(pll, a, b)			_PICK_EVEN(pll, a, b)
+
+#define _MMIO_PIPE(pipe, a, b)		_MMIO(_PIPE(pipe, a, b))
+#define _MMIO_PLANE(plane, a, b)	_MMIO(_PLANE(plane, a, b))
+#define _MMIO_TRANS(tran, a, b)		_MMIO(_TRANS(tran, a, b))
+#define _MMIO_PORT(port, a, b)		_MMIO(_PORT(port, a, b))
+#define _MMIO_PLL(pll, a, b)		_MMIO(_PLL(pll, a, b))
+
+#define _PHY3(phy, ...)			_PICK(phy, __VA_ARGS__)
+
+#define _MMIO_PIPE3(pipe, a, b, c)	_MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c)	_MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PHY3(phy, a, b, c)	_MMIO(_PHY3(phy, a, b, c))
+
+/*
+ * Device info offset array based helpers for groups of registers with unevenly
+ * spaced base offsets.
+ */
+#define _MMIO_PIPE2(pipe, reg)		_MMIO(dev_priv->info.pipe_offsets[pipe] - \
+					      dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+					      dev_priv->info.display_mmio_offset)
+#define _MMIO_TRANS2(pipe, reg)		_MMIO(dev_priv->info.trans_offsets[(pipe)] - \
+					      dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+					      dev_priv->info.display_mmio_offset)
+#define _CURSOR2(pipe, reg)		_MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
+					      dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+					      dev_priv->info.display_mmio_offset)
 
 #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({					   \
@@ -1631,20 +1648,48 @@ enum i915_power_well_id {
 #define   PHY_RESERVED			(1 << 7)
 #define BXT_PORT_CL1CM_DW0(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
 
+#define _PORT_CL1CM_DW9_A		0x162024
+#define _PORT_CL1CM_DW9_BC		0x6C024
+#define   IREF0RC_OFFSET_SHIFT		8
+#define   IREF0RC_OFFSET_MASK		(0xFF << IREF0RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW9(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
+
+#define _PORT_CL1CM_DW10_A		0x162028
+#define _PORT_CL1CM_DW10_BC		0x6C028
+#define   IREF1RC_OFFSET_SHIFT		8
+#define   IREF1RC_OFFSET_MASK		(0xFF << IREF1RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW10(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+
+#define _PORT_CL1CM_DW28_A		0x162070
+#define _PORT_CL1CM_DW28_BC		0x6C070
+#define   OCL1_POWER_DOWN_EN		(1 << 23)
+#define   DW28_OLDO_DYN_PWR_DOWN_EN	(1 << 22)
+#define   SUS_CLK_CONFIG		0x3
+#define BXT_PORT_CL1CM_DW28(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
+
+#define _PORT_CL1CM_DW30_A		0x162078
+#define _PORT_CL1CM_DW30_BC		0x6C078
+#define   OCL2_LDOFUSE_PWR_DIS		(1 << 6)
+#define BXT_PORT_CL1CM_DW30(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+
+/*
+ * CNL/ICL Port/COMBO-PHY Registers
+ */
+#define _ICL_COMBOPHY_A			0x162000
+#define _ICL_COMBOPHY_B			0x6C000
+#define _ICL_COMBOPHY(port)		_PICK(port, _ICL_COMBOPHY_A, \
+					      _ICL_COMBOPHY_B)
+
+/* CNL/ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, port)	(_ICL_COMBOPHY(port) + \
+					 4 * (dw))
+
 #define CNL_PORT_CL1CM_DW5		_MMIO(0x162014)
+#define ICL_PORT_CL_DW5(port)		_MMIO(_ICL_PORT_CL_DW(5, port))
 #define   CL_POWER_DOWN_ENABLE		(1 << 4)
 #define   SUS_CLOCK_CONFIG		(3 << 0)
 
-#define _ICL_PORT_CL_DW5_A	0x162014
-#define _ICL_PORT_CL_DW5_B	0x6C014
-#define ICL_PORT_CL_DW5(port)	_MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
-						 _ICL_PORT_CL_DW5_B)
-
-#define _CNL_PORT_CL_DW10_A		0x162028
-#define _ICL_PORT_CL_DW10_B		0x6c028
-#define ICL_PORT_CL_DW10(port)		_MMIO_PORT(port,	\
-						   _CNL_PORT_CL_DW10_A, \
-						   _ICL_PORT_CL_DW10_B)
+#define ICL_PORT_CL_DW10(port)		_MMIO(_ICL_PORT_CL_DW(10, port))
 #define  PG_SEQ_DELAY_OVERRIDE_MASK	(3 << 25)
 #define  PG_SEQ_DELAY_OVERRIDE_SHIFT	25
 #define  PG_SEQ_DELAY_OVERRIDE_ENABLE	(1 << 24)
@@ -1660,37 +1705,41 @@ enum i915_power_well_id {
 #define  PWR_DOWN_LN_MASK		(0xf << 4)
 #define  PWR_DOWN_LN_SHIFT		4
 
-#define _PORT_CL1CM_DW9_A		0x162024
-#define _PORT_CL1CM_DW9_BC		0x6C024
-#define   IREF0RC_OFFSET_SHIFT		8
-#define   IREF0RC_OFFSET_MASK		(0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
-
-#define _PORT_CL1CM_DW10_A		0x162028
-#define _PORT_CL1CM_DW10_BC		0x6C028
-#define   IREF1RC_OFFSET_SHIFT		8
-#define   IREF1RC_OFFSET_MASK		(0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-
-#define _ICL_PORT_CL_DW12_A		0x162030
-#define _ICL_PORT_CL_DW12_B		0x6C030
+#define ICL_PORT_CL_DW12(port)		_MMIO(_ICL_PORT_CL_DW(12, port))
 #define   ICL_LANE_ENABLE_AUX		(1 << 0)
-#define ICL_PORT_CL_DW12(port)		_MMIO_PORT((port),		\
-						   _ICL_PORT_CL_DW12_A, \
-						   _ICL_PORT_CL_DW12_B)
 
-#define _PORT_CL1CM_DW28_A		0x162070
-#define _PORT_CL1CM_DW28_BC		0x6C070
-#define   OCL1_POWER_DOWN_EN		(1 << 23)
-#define   DW28_OLDO_DYN_PWR_DOWN_EN	(1 << 22)
-#define   SUS_CLK_CONFIG		0x3
-#define BXT_PORT_CL1CM_DW28(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
+/* CNL/ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP			0x100
+#define _ICL_PORT_COMP_DW(dw, port)	(_ICL_COMBOPHY(port) + \
+					 _ICL_PORT_COMP + 4 * (dw))
 
-#define _PORT_CL1CM_DW30_A		0x162078
-#define _PORT_CL1CM_DW30_BC		0x6C078
-#define   OCL2_LDOFUSE_PWR_DIS		(1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+#define CNL_PORT_COMP_DW0		_MMIO(0x162100)
+#define ICL_PORT_COMP_DW0(port)		_MMIO(_ICL_PORT_COMP_DW(0, port))
+#define   COMP_INIT			(1 << 31)
 
+#define CNL_PORT_COMP_DW1		_MMIO(0x162104)
+#define ICL_PORT_COMP_DW1(port)		_MMIO(_ICL_PORT_COMP_DW(1, port))
+
+#define CNL_PORT_COMP_DW3		_MMIO(0x16210c)
+#define ICL_PORT_COMP_DW3(port)		_MMIO(_ICL_PORT_COMP_DW(3, port))
+#define   PROCESS_INFO_DOT_0		(0 << 26)
+#define   PROCESS_INFO_DOT_1		(1 << 26)
+#define   PROCESS_INFO_DOT_4		(2 << 26)
+#define   PROCESS_INFO_MASK		(7 << 26)
+#define   PROCESS_INFO_SHIFT		26
+#define   VOLTAGE_INFO_0_85V		(0 << 24)
+#define   VOLTAGE_INFO_0_95V		(1 << 24)
+#define   VOLTAGE_INFO_1_05V		(2 << 24)
+#define   VOLTAGE_INFO_MASK		(3 << 24)
+#define   VOLTAGE_INFO_SHIFT		24
+
+#define CNL_PORT_COMP_DW9		_MMIO(0x162124)
+#define ICL_PORT_COMP_DW9(port)		_MMIO(_ICL_PORT_COMP_DW(9, port))
+
+#define CNL_PORT_COMP_DW10		_MMIO(0x162128)
+#define ICL_PORT_COMP_DW10(port)	_MMIO(_ICL_PORT_COMP_DW(10, port))
+
+/* CNL/ICL Port PCS registers */
 #define _CNL_PORT_PCS_DW1_GRP_AE	0x162304
 #define _CNL_PORT_PCS_DW1_GRP_B		0x162384
 #define _CNL_PORT_PCS_DW1_GRP_C		0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
 						    _CNL_PORT_PCS_DW1_GRP_D, \
 						    _CNL_PORT_PCS_DW1_GRP_AE, \
 						    _CNL_PORT_PCS_DW1_GRP_F))
-
 #define CNL_PORT_PCS_DW1_LN0(port)	_MMIO(_PICK(port, \
 						    _CNL_PORT_PCS_DW1_LN0_AE, \
 						    _CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
 						    _CNL_PORT_PCS_DW1_LN0_AE, \
 						    _CNL_PORT_PCS_DW1_LN0_F))
 
-#define _ICL_PORT_PCS_DW1_GRP_A		0x162604
-#define _ICL_PORT_PCS_DW1_GRP_B		0x6C604
-#define _ICL_PORT_PCS_DW1_LN0_A		0x162804
-#define _ICL_PORT_PCS_DW1_LN0_B		0x6C804
-#define _ICL_PORT_PCS_DW1_AUX_A		0x162304
-#define _ICL_PORT_PCS_DW1_AUX_B		0x6c304
-#define ICL_PORT_PCS_DW1_GRP(port)	_MMIO_PORT(port,\
-						   _ICL_PORT_PCS_DW1_GRP_A, \
-						   _ICL_PORT_PCS_DW1_GRP_B)
-#define ICL_PORT_PCS_DW1_LN0(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_PCS_DW1_LN0_A, \
-						   _ICL_PORT_PCS_DW1_LN0_B)
-#define ICL_PORT_PCS_DW1_AUX(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_PCS_DW1_AUX_A, \
-						   _ICL_PORT_PCS_DW1_AUX_B)
+#define _ICL_PORT_PCS_AUX		0x300
+#define _ICL_PORT_PCS_GRP		0x600
+#define _ICL_PORT_PCS_LN(ln)		(0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, port)	(_ICL_COMBOPHY(port) + \
+					 _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, port)	(_ICL_COMBOPHY(port) + \
+					 _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+					  _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(port)	_MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
+#define ICL_PORT_PCS_DW1_GRP(port)	_MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
+#define ICL_PORT_PCS_DW1_LN0(port)	_MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
 #define   COMMON_KEEPER_EN		(1 << 26)
 
-/* CNL Port TX registers */
+/* CNL/ICL Port TX registers */
 #define _CNL_PORT_TX_AE_GRP_OFFSET		0x162340
 #define _CNL_PORT_TX_B_GRP_OFFSET		0x1623C0
 #define _CNL_PORT_TX_C_GRP_OFFSET		0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
 					       _CNL_PORT_TX_F_LN0_OFFSET) + \
 					       4 * (dw))
 
-#define CNL_PORT_TX_DW2_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
-#define CNL_PORT_TX_DW2_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
-#define _ICL_PORT_TX_DW2_GRP_A		0x162688
-#define _ICL_PORT_TX_DW2_GRP_B		0x6C688
-#define _ICL_PORT_TX_DW2_LN0_A		0x162888
-#define _ICL_PORT_TX_DW2_LN0_B		0x6C888
-#define _ICL_PORT_TX_DW2_AUX_A		0x162388
-#define _ICL_PORT_TX_DW2_AUX_B		0x6c388
-#define ICL_PORT_TX_DW2_GRP(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW2_GRP_A, \
-						   _ICL_PORT_TX_DW2_GRP_B)
-#define ICL_PORT_TX_DW2_LN0(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW2_LN0_A, \
-						   _ICL_PORT_TX_DW2_LN0_B)
-#define ICL_PORT_TX_DW2_AUX(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW2_AUX_A, \
-						   _ICL_PORT_TX_DW2_AUX_B)
+#define _ICL_PORT_TX_AUX		0x380
+#define _ICL_PORT_TX_GRP		0x680
+#define _ICL_PORT_TX_LN(ln)		(0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, port)	(_ICL_COMBOPHY(port) + \
+					 _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, port)	(_ICL_COMBOPHY(port) + \
+					 _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+					  _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define CNL_PORT_TX_DW2_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP(2, port))
+#define CNL_PORT_TX_DW2_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0(2, port))
+#define ICL_PORT_TX_DW2_AUX(port)	_MMIO(_ICL_PORT_TX_DW_AUX(2, port))
+#define ICL_PORT_TX_DW2_GRP(port)	_MMIO(_ICL_PORT_TX_DW_GRP(2, port))
+#define ICL_PORT_TX_DW2_LN0(port)	_MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
 #define   SWING_SEL_UPPER(x)		(((x) >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK		(1 << 15)
 #define   SWING_SEL_LOWER(x)		(((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
 #define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
 					   ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
 						    _CNL_PORT_TX_DW4_LN0_AE)))
-#define _ICL_PORT_TX_DW4_GRP_A		0x162690
-#define _ICL_PORT_TX_DW4_GRP_B		0x6C690
-#define _ICL_PORT_TX_DW4_LN0_A		0x162890
-#define _ICL_PORT_TX_DW4_LN1_A		0x162990
-#define _ICL_PORT_TX_DW4_LN0_B		0x6C890
-#define _ICL_PORT_TX_DW4_AUX_A		0x162390
-#define _ICL_PORT_TX_DW4_AUX_B		0x6c390
-#define ICL_PORT_TX_DW4_GRP(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW4_GRP_A, \
-						   _ICL_PORT_TX_DW4_GRP_B)
-#define ICL_PORT_TX_DW4_LN(port, ln)	_MMIO(_PORT(port, \
-						   _ICL_PORT_TX_DW4_LN0_A, \
-						   _ICL_PORT_TX_DW4_LN0_B) + \
-					     ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
-						      _ICL_PORT_TX_DW4_LN0_A)))
-#define ICL_PORT_TX_DW4_AUX(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW4_AUX_A, \
-						   _ICL_PORT_TX_DW4_AUX_B)
+#define ICL_PORT_TX_DW4_AUX(port)	_MMIO(_ICL_PORT_TX_DW_AUX(4, port))
+#define ICL_PORT_TX_DW4_GRP(port)	_MMIO(_ICL_PORT_TX_DW_GRP(4, port))
+#define ICL_PORT_TX_DW4_LN0(port)	_MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
+#define ICL_PORT_TX_DW4_LN(port, ln)	_MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
 #define   LOADGEN_SELECT		(1 << 31)
 #define   POST_CURSOR_1(x)		((x) << 12)
 #define   POST_CURSOR_1_MASK		(0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
 #define   CURSOR_COEFF(x)		((x) << 0)
 #define   CURSOR_COEFF_MASK		(0x3F << 0)
 
-#define CNL_PORT_TX_DW5_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
-#define CNL_PORT_TX_DW5_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
-#define _ICL_PORT_TX_DW5_GRP_A		0x162694
-#define _ICL_PORT_TX_DW5_GRP_B		0x6C694
-#define _ICL_PORT_TX_DW5_LN0_A		0x162894
-#define _ICL_PORT_TX_DW5_LN0_B		0x6C894
-#define _ICL_PORT_TX_DW5_AUX_A		0x162394
-#define _ICL_PORT_TX_DW5_AUX_B		0x6c394
-#define ICL_PORT_TX_DW5_GRP(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW5_GRP_A, \
-						   _ICL_PORT_TX_DW5_GRP_B)
-#define ICL_PORT_TX_DW5_LN0(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW5_LN0_A, \
-						   _ICL_PORT_TX_DW5_LN0_B)
-#define ICL_PORT_TX_DW5_AUX(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_TX_DW5_AUX_A, \
-						   _ICL_PORT_TX_DW5_AUX_B)
+#define CNL_PORT_TX_DW5_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP(5, port))
+#define CNL_PORT_TX_DW5_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0(5, port))
+#define ICL_PORT_TX_DW5_AUX(port)	_MMIO(_ICL_PORT_TX_DW_AUX(5, port))
+#define ICL_PORT_TX_DW5_GRP(port)	_MMIO(_ICL_PORT_TX_DW_GRP(5, port))
+#define ICL_PORT_TX_DW5_LN0(port)	_MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
 #define   TX_TRAINING_EN		(1 << 31)
 #define   TAP2_DISABLE			(1 << 30)
 #define   TAP3_DISABLE			(1 << 29)
@@ -2054,47 +2072,10 @@ enum i915_power_well_id {
 #define BXT_PORT_CL2CM_DW6(phy)		_BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
 #define   DW6_OLDO_DYN_PWR_DOWN_EN	(1 << 28)
 
-#define CNL_PORT_COMP_DW0		_MMIO(0x162100)
-#define   COMP_INIT			(1 << 31)
-#define CNL_PORT_COMP_DW1		_MMIO(0x162104)
-#define CNL_PORT_COMP_DW3		_MMIO(0x16210c)
-#define   PROCESS_INFO_DOT_0		(0 << 26)
-#define   PROCESS_INFO_DOT_1		(1 << 26)
-#define   PROCESS_INFO_DOT_4		(2 << 26)
-#define   PROCESS_INFO_MASK		(7 << 26)
-#define   PROCESS_INFO_SHIFT		26
-#define   VOLTAGE_INFO_0_85V		(0 << 24)
-#define   VOLTAGE_INFO_0_95V		(1 << 24)
-#define   VOLTAGE_INFO_1_05V		(2 << 24)
-#define   VOLTAGE_INFO_MASK		(3 << 24)
-#define   VOLTAGE_INFO_SHIFT		24
-#define CNL_PORT_COMP_DW9		_MMIO(0x162124)
-#define CNL_PORT_COMP_DW10		_MMIO(0x162128)
-
-#define _ICL_PORT_COMP_DW0_A		0x162100
-#define _ICL_PORT_COMP_DW0_B		0x6C100
-#define ICL_PORT_COMP_DW0(port)		_MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
-							 _ICL_PORT_COMP_DW0_B)
-#define _ICL_PORT_COMP_DW1_A		0x162104
-#define _ICL_PORT_COMP_DW1_B		0x6C104
-#define ICL_PORT_COMP_DW1(port)		_MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
-							 _ICL_PORT_COMP_DW1_B)
-#define _ICL_PORT_COMP_DW3_A		0x16210C
-#define _ICL_PORT_COMP_DW3_B		0x6C10C
-#define ICL_PORT_COMP_DW3(port)		_MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
-							 _ICL_PORT_COMP_DW3_B)
-#define _ICL_PORT_COMP_DW9_A		0x162124
-#define _ICL_PORT_COMP_DW9_B		0x6C124
-#define ICL_PORT_COMP_DW9(port)		_MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
-							 _ICL_PORT_COMP_DW9_B)
-#define _ICL_PORT_COMP_DW10_A		0x162128
-#define _ICL_PORT_COMP_DW10_B		0x6C128
-#define ICL_PORT_COMP_DW10(port)	_MMIO_PORT(port, \
-						   _ICL_PORT_COMP_DW10_A, \
-						   _ICL_PORT_COMP_DW10_B)
+#define FIA1_BASE			0x163000
 
 /* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1		_MMIO(0x1638C0)
+#define PORT_TX_DFLEXDPMLE1		_MMIO(FIA1_BASE + 0x008C0)
 #define   DFLEXDPMLE1_DPMLETC_MASK(tc_port)	(0xf << (4 * (tc_port)))
 #define   DFLEXDPMLE1_DPMLETC_ML0(tc_port)	(1 << (4 * (tc_port)))
 #define   DFLEXDPMLE1_DPMLETC_ML1_0(tc_port)	(3 << (4 * (tc_port)))
@@ -2417,6 +2398,7 @@ enum i915_power_well_id {
 
 #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
 #define   GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define   GAMW_ECO_DEV_CTX_RELOAD_DISABLE	(1 << 7)
 
 #define GAMT_CHKN_BIT_REG	_MMIO(0x4ab8)
 #define   GAMT_CHKN_DISABLE_L3_COH_PIPE			(1 << 31)
@@ -2577,6 +2559,7 @@ enum i915_power_well_id {
 /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
 #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
 #define   GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define   GEN11_ENABLE_32_PLANE_MODE (1 << 7)
 
 /* WaClearTdlStateAckDirtyBits */
 #define GEN8_STATE_ACK		_MMIO(0x20F0)
@@ -3479,11 +3462,13 @@ enum i915_power_well_id {
 /*
  * Palette regs
  */
-#define PALETTE_A_OFFSET 0xa000
-#define PALETTE_B_OFFSET 0xa800
-#define CHV_PALETTE_C_OFFSET 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] +	\
-			      dev_priv->info.display_mmio_offset + (i) * 4)
+#define _PALETTE_A		0xa000
+#define _PALETTE_B		0xa800
+#define _CHV_PALETTE_C		0xc000
+#define PALETTE(pipe, i)	_MMIO(dev_priv->info.display_mmio_offset + \
+				      _PICK((pipe), _PALETTE_A,		\
+					    _PALETTE_B, _CHV_PALETTE_C) + \
+				      (i) * 4)
 
 /* MCH MMIO space */
 
@@ -4065,15 +4050,27 @@ enum {
 #define _VSYNCSHIFT_B	0x61028
 #define _PIPE_MULT_B	0x6102c
 
+/* DSI 0 timing regs */
+#define _HTOTAL_DSI0		0x6b000
+#define _HSYNC_DSI0		0x6b008
+#define _VTOTAL_DSI0		0x6b00c
+#define _VSYNC_DSI0		0x6b014
+#define _VSYNCSHIFT_DSI0	0x6b028
+
+/* DSI 1 timing regs */
+#define _HTOTAL_DSI1		0x6b800
+#define _HSYNC_DSI1		0x6b808
+#define _VTOTAL_DSI1		0x6b80c
+#define _VSYNC_DSI1		0x6b814
+#define _VSYNCSHIFT_DSI1	0x6b828
+
 #define TRANSCODER_A_OFFSET 0x60000
 #define TRANSCODER_B_OFFSET 0x61000
 #define TRANSCODER_C_OFFSET 0x62000
 #define CHV_TRANSCODER_C_OFFSET 0x63000
 #define TRANSCODER_EDP_OFFSET 0x6f000
-
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
-	dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
-	dev_priv->info.display_mmio_offset)
+#define TRANSCODER_DSI0_OFFSET	0x6b000
+#define TRANSCODER_DSI1_OFFSET	0x6b800
 
 #define HTOTAL(trans)		_MMIO_TRANS2(trans, _HTOTAL_A)
 #define HBLANK(trans)		_MMIO_TRANS2(trans, _HBLANK_A)
@@ -4153,9 +4150,13 @@ enum {
 /* Bspec claims those aren't shifted but stay at 0x64800 */
 #define EDP_PSR_IMR				_MMIO(0x64834)
 #define EDP_PSR_IIR				_MMIO(0x64838)
-#define   EDP_PSR_ERROR(trans)			(1 << (((trans) * 8 + 10) & 31))
-#define   EDP_PSR_POST_EXIT(trans)		(1 << (((trans) * 8 + 9) & 31))
-#define   EDP_PSR_PRE_ENTRY(trans)		(1 << (((trans) * 8 + 8) & 31))
+#define   EDP_PSR_ERROR(shift)			(1 << ((shift) + 2))
+#define   EDP_PSR_POST_EXIT(shift)		(1 << ((shift) + 1))
+#define   EDP_PSR_PRE_ENTRY(shift)		(1 << (shift))
+#define   EDP_PSR_TRANSCODER_C_SHIFT		24
+#define   EDP_PSR_TRANSCODER_B_SHIFT		16
+#define   EDP_PSR_TRANSCODER_A_SHIFT		8
+#define   EDP_PSR_TRANSCODER_EDP_SHIFT		0
 
 #define EDP_PSR_AUX_CTL				_MMIO(dev_priv->psr_mmio_base + 0x10)
 #define   EDP_PSR_AUX_CTL_TIME_OUT_MASK		(3 << 26)
@@ -4199,7 +4200,7 @@ enum {
 #define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
 #define   EDP_PSR_DEBUG_MASK_HPD               (1 << 25)
-#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16)
+#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16) /* Reserved in ICL+ */
 #define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
 
 #define EDP_PSR2_CTL			_MMIO(0x6f900)
@@ -4236,7 +4237,7 @@ enum {
 #define  PSR_EVENT_FRONT_BUFFER_MODIFY		(1 << 9)
 #define  PSR_EVENT_WD_TIMER_EXPIRE		(1 << 8)
 #define  PSR_EVENT_PIPE_REGISTERS_UPDATE	(1 << 6)
-#define  PSR_EVENT_REGISTER_UPDATE		(1 << 5)
+#define  PSR_EVENT_REGISTER_UPDATE		(1 << 5) /* Reserved in ICL+ */
 #define  PSR_EVENT_HDCP_ENABLE			(1 << 4)
 #define  PSR_EVENT_KVMR_SESSION_ENABLE		(1 << 3)
 #define  PSR_EVENT_VBI_ENABLE			(1 << 2)
@@ -4569,6 +4570,7 @@ enum {
  * of the infoframe structure specified by CEA-861. */
 #define   VIDEO_DIP_DATA_SIZE	32
 #define   VIDEO_DIP_VSC_DATA_SIZE	36
+#define   VIDEO_DIP_PPS_DATA_SIZE	132
 #define VIDEO_DIP_CTL		_MMIO(0x61170)
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE		(1 << 31)
@@ -4588,6 +4590,15 @@ enum {
 #define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
 #define   VIDEO_DIP_FREQ_MASK		(3 << 16)
 /* HSW and later: */
+#define   DRM_DIP_ENABLE		(1 << 28)
+#define   PSR_VSC_BIT_7_SET		(1 << 27)
+#define   VSC_SELECT_MASK		(0x3 << 25)
+#define   VSC_SELECT_SHIFT		25
+#define   VSC_DIP_HW_HEA_DATA		(0 << 25)
+#define   VSC_DIP_HW_HEA_SW_DATA	(1 << 25)
+#define   VSC_DIP_HW_DATA_SW_HEA	(2 << 25)
+#define   VSC_DIP_SW_HEA_DATA		(3 << 25)
+#define   VDIP_ENABLE_PPS		(1 << 24)
 #define   VIDEO_DIP_ENABLE_VSC_HSW	(1 << 20)
 #define   VIDEO_DIP_ENABLE_GCP_HSW	(1 << 16)
 #define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12)
@@ -4595,16 +4606,6 @@ enum {
 #define   VIDEO_DIP_ENABLE_GMP_HSW	(1 << 4)
 #define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
 
-#define  DRM_DIP_ENABLE			(1 << 28)
-#define  PSR_VSC_BIT_7_SET		(1 << 27)
-#define  VSC_SELECT_MASK		(0x3 << 25)
-#define  VSC_SELECT_SHIFT		25
-#define  VSC_DIP_HW_HEA_DATA		(0 << 25)
-#define  VSC_DIP_HW_HEA_SW_DATA		(1 << 25)
-#define  VSC_DIP_HW_DATA_SW_HEA		(2 << 25)
-#define  VSC_DIP_SW_HEA_DATA		(3 << 25)
-#define  VDIP_ENABLE_PPS		(1 << 24)
-
 /* Panel power sequencing */
 #define PPS_BASE			0x61200
 #define VLV_PPS_BASE			(VLV_DISPLAY_BASE + PPS_BASE)
@@ -4617,6 +4618,17 @@ enum {
 #define _PP_STATUS			0x61200
 #define PP_STATUS(pps_idx)		_MMIO_PPS(pps_idx, _PP_STATUS)
 #define   PP_ON				(1 << 31)
+
+#define _PP_CONTROL_1			0xc7204
+#define _PP_CONTROL_2			0xc7304
+#define ICP_PP_CONTROL(x)		_MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+					      _PP_CONTROL_2)
+#define  POWER_CYCLE_DELAY_MASK	(0x1f << 4)
+#define  POWER_CYCLE_DELAY_SHIFT	4
+#define  VDD_OVERRIDE_FORCE		(1 << 3)
+#define  BACKLIGHT_ENABLE		(1 << 2)
+#define  PWR_DOWN_ON_RESET		(1 << 1)
+#define  PWR_STATE_TARGET		(1 << 0)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -5640,9 +5652,9 @@ enum {
  */
 #define PIPE_EDP_OFFSET	0x7f000
 
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
-	dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
-	dev_priv->info.display_mmio_offset)
+/* ICL DSI 0 and 1 */
+#define PIPE_DSI0_OFFSET	0x7b000
+#define PIPE_DSI1_OFFSET	0x7b800
 
 #define PIPECONF(pipe)		_MMIO_PIPE2(pipe, _PIPEACONF)
 #define PIPEDSL(pipe)		_MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6091,10 +6103,6 @@ enum {
 #define _CURBBASE_IVB		0x71084
 #define _CURBPOS_IVB		0x71088
 
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
-	dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
-	dev_priv->info.display_mmio_offset)
-
 #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
 #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
 #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6228,6 +6236,10 @@ enum {
 #define _DSPBOFFSET		(dev_priv->info.display_mmio_offset + 0x711A4)
 #define _DSPBSURFLIVE		(dev_priv->info.display_mmio_offset + 0x711AC)
 
+/* ICL DSI 0 and 1 */
+#define _PIPEDSI0CONF		0x7b008
+#define _PIPEDSI1CONF		0x7b808
+
 /* Sprite A control */
 #define _DVSACNTR		0x72180
 #define   DVS_ENABLE		(1 << 31)
@@ -6515,6 +6527,7 @@ enum {
 #define   PLANE_CTL_KEY_ENABLE_DESTINATION	(2 << 21)
 #define   PLANE_CTL_ORDER_BGRX			(0 << 20)
 #define   PLANE_CTL_ORDER_RGBX			(1 << 20)
+#define   PLANE_CTL_YUV420_Y_PLANE		(1 << 19)
 #define   PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709	(1 << 18)
 #define   PLANE_CTL_YUV422_ORDER_MASK		(0x3 << 16)
 #define   PLANE_CTL_YUV422_YUYV			(0 << 16)
@@ -6558,17 +6571,33 @@ enum {
 #define _PLANE_KEYVAL_2_A			0x70294
 #define _PLANE_KEYMSK_1_A			0x70198
 #define _PLANE_KEYMSK_2_A			0x70298
+#define  PLANE_KEYMSK_ALPHA_ENABLE		(1 << 31)
 #define _PLANE_KEYMAX_1_A			0x701a0
 #define _PLANE_KEYMAX_2_A			0x702a0
+#define  PLANE_KEYMAX_ALPHA(a)			((a) << 24)
 #define _PLANE_AUX_DIST_1_A			0x701c0
 #define _PLANE_AUX_DIST_2_A			0x702c0
 #define _PLANE_AUX_OFFSET_1_A			0x701c4
 #define _PLANE_AUX_OFFSET_2_A			0x702c4
+#define _PLANE_CUS_CTL_1_A			0x701c8
+#define _PLANE_CUS_CTL_2_A			0x702c8
+#define  PLANE_CUS_ENABLE			(1 << 31)
+#define  PLANE_CUS_PLANE_6			(0 << 30)
+#define  PLANE_CUS_PLANE_7			(1 << 30)
+#define  PLANE_CUS_HPHASE_SIGN_NEGATIVE		(1 << 19)
+#define  PLANE_CUS_HPHASE_0			(0 << 16)
+#define  PLANE_CUS_HPHASE_0_25			(1 << 16)
+#define  PLANE_CUS_HPHASE_0_5			(2 << 16)
+#define  PLANE_CUS_VPHASE_SIGN_NEGATIVE		(1 << 15)
+#define  PLANE_CUS_VPHASE_0			(0 << 12)
+#define  PLANE_CUS_VPHASE_0_25			(1 << 12)
+#define  PLANE_CUS_VPHASE_0_5			(2 << 12)
 #define _PLANE_COLOR_CTL_1_A			0x701CC /* GLK+ */
 #define _PLANE_COLOR_CTL_2_A			0x702CC /* GLK+ */
 #define _PLANE_COLOR_CTL_3_A			0x703CC /* GLK+ */
 #define   PLANE_COLOR_PIPE_GAMMA_ENABLE		(1 << 30) /* Pre-ICL */
 #define   PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE	(1 << 28)
+#define   PLANE_COLOR_INPUT_CSC_ENABLE		(1 << 20) /* ICL+ */
 #define   PLANE_COLOR_PIPE_CSC_ENABLE		(1 << 23) /* Pre-ICL */
 #define   PLANE_COLOR_CSC_MODE_BYPASS			(0 << 17)
 #define   PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709		(1 << 17)
@@ -6585,6 +6614,55 @@ enum {
 #define _PLANE_NV12_BUF_CFG_1_A		0x70278
 #define _PLANE_NV12_BUF_CFG_2_A		0x70378
 
+/* Input CSC Register Definitions */
+#define _PLANE_INPUT_CSC_RY_GY_1_A	0x701E0
+#define _PLANE_INPUT_CSC_RY_GY_2_A	0x702E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1_B	0x711E0
+#define _PLANE_INPUT_CSC_RY_GY_2_B	0x712E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
+	     _PLANE_INPUT_CSC_RY_GY_1_B)
+#define _PLANE_INPUT_CSC_RY_GY_2(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+	     _PLANE_INPUT_CSC_RY_GY_2_B)
+
+#define PLANE_INPUT_CSC_COEFF(pipe, plane, index)	\
+	_MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) +  (index) * 4, \
+		    _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_A		0x701F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_A		0x702F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_B		0x711F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_B		0x712F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
+	     _PLANE_INPUT_CSC_PREOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
+	     _PLANE_INPUT_CSC_PREOFF_HI_2_B)
+#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index)	\
+	_MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
+		    _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A		0x70204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A		0x70304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B		0x71204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B		0x71304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
+	     _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe)	\
+	_PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
+	     _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index)	\
+	_MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
+		    _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
 
 #define _PLANE_CTL_1_B				0x71180
 #define _PLANE_CTL_2_B				0x71280
@@ -6701,6 +6779,15 @@ enum {
 #define PLANE_AUX_OFFSET(pipe, plane)   \
 	_MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
 
+#define _PLANE_CUS_CTL_1_B		0x711c8
+#define _PLANE_CUS_CTL_2_B		0x712c8
+#define _PLANE_CUS_CTL_1(pipe)       \
+		_PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
+#define _PLANE_CUS_CTL_2(pipe)       \
+		_PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
+#define PLANE_CUS_CTL(pipe, plane)   \
+	_MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
+
 #define _PLANE_COLOR_CTL_1_B			0x711CC
 #define _PLANE_COLOR_CTL_2_B			0x712CC
 #define _PLANE_COLOR_CTL_3_B			0x713CC
@@ -6854,11 +6941,12 @@ enum {
 #define _PS_2B_CTRL      0x68A80
 #define _PS_1C_CTRL      0x69180
 #define PS_SCALER_EN        (1 << 31)
-#define PS_SCALER_MODE_MASK (3 << 28)
-#define PS_SCALER_MODE_DYN  (0 << 28)
-#define PS_SCALER_MODE_HQ  (1 << 28)
+#define SKL_PS_SCALER_MODE_MASK (3 << 28)
+#define SKL_PS_SCALER_MODE_DYN  (0 << 28)
+#define SKL_PS_SCALER_MODE_HQ  (1 << 28)
 #define SKL_PS_SCALER_MODE_NV12 (2 << 28)
 #define PS_SCALER_MODE_PLANAR (1 << 29)
+#define PS_SCALER_MODE_NORMAL (0 << 29)
 #define PS_PLANE_SEL_MASK  (7 << 25)
 #define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
 #define PS_FILTER_MASK         (3 << 23)
@@ -6875,6 +6963,8 @@ enum {
 #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
 #define PS_VADAPT_MODE_MOD_ADAPT   (1 << 5)
 #define PS_VADAPT_MODE_MOST_ADAPT  (3 << 5)
+#define PS_PLANE_Y_SEL_MASK  (7 << 5)
+#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
 
 #define _PS_PWR_GATE_1A     0x68160
 #define _PS_PWR_GATE_2A     0x68260
@@ -7321,9 +7411,10 @@ enum {
 #define  BDW_DPRS_MASK_VBLANK_SRD	(1 << 0)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
-#define CHICKEN_TRANS_A         0x420c0
-#define CHICKEN_TRANS_B         0x420c4
-#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define CHICKEN_TRANS_A		_MMIO(0x420c0)
+#define CHICKEN_TRANS_B		_MMIO(0x420c4)
+#define CHICKEN_TRANS_C		_MMIO(0x420c8)
+#define CHICKEN_TRANS_EDP	_MMIO(0x420cc)
 #define  VSC_DATA_SEL_SOFTWARE_CONTROL	(1 << 25) /* GLK and CNL+ */
 #define  DDI_TRAINING_OVERRIDE_ENABLE	(1 << 19)
 #define  DDI_TRAINING_OVERRIDE_VALUE	(1 << 18)
@@ -7413,6 +7504,10 @@ enum {
 #define GEN9_SLICE_COMMON_ECO_CHICKEN1		_MMIO(0x731c)
 #define   GEN11_STATE_CACHE_REDIRECT_TO_CS	(1 << 11)
 
+#define GEN7_SARCHKMD				_MMIO(0xB000)
+#define GEN7_DISABLE_DEMAND_PREFETCH		(1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH           (1 << 30)
+
 #define GEN7_L3SQCREG1				_MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE		0x00D30000
 
@@ -7667,6 +7762,7 @@ enum {
 #define   ICP_DDIB_HPD_LONG_DETECT		(2 << 4)
 #define   ICP_DDIB_HPD_SHORT_LONG_DETECT	(3 << 4)
 #define   ICP_DDIA_HPD_ENABLE			(1 << 3)
+#define   ICP_DDIA_HPD_OP_DRIVE_1		(1 << 2)
 #define   ICP_DDIA_HPD_STATUS_MASK		(3 << 0)
 #define   ICP_DDIA_HPD_NO_DETECT		(0 << 0)
 #define   ICP_DDIA_HPD_SHORT_DETECT		(1 << 0)
@@ -7828,8 +7924,7 @@ enum {
 #define  CNP_RAWCLK_DIV_MASK	(0x3ff << 16)
 #define  CNP_RAWCLK_DIV(div)	((div) << 16)
 #define  CNP_RAWCLK_FRAC_MASK	(0xf << 26)
-#define  CNP_RAWCLK_FRAC(frac)	((frac) << 26)
-#define  ICP_RAWCLK_DEN(den)	((den) << 26)
+#define  CNP_RAWCLK_DEN(den)	((den) << 26)
 #define  ICP_RAWCLK_NUM(num)	((num) << 11)
 
 #define PCH_DPLL_TMR_CFG        _MMIO(0xc6208)
@@ -8629,8 +8724,7 @@ enum {
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC	(1 << 9)
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC	(1 << 7)
 
-#define GAMW_ECO_DEV_RW_IA_REG			_MMIO(0x4080)
-#define   GAMW_ECO_DEV_CTX_RELOAD_DISABLE	(1 << 7)
+#define GEN10_SAMPLER_MODE		_MMIO(0xE18C)
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)		_MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8931,6 +9025,15 @@ enum skl_power_gate {
 #define   CNL_AUX_ANAOVRD1_ENABLE	(1 << 16)
 #define   CNL_AUX_ANAOVRD1_LDO_BYPASS	(1 << 23)
 
+#define _ICL_AUX_REG_IDX(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define _ICL_AUX_ANAOVRD1_A		0x162398
+#define _ICL_AUX_ANAOVRD1_B		0x6C398
+#define ICL_AUX_ANAOVRD1(pw_idx)	_MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
+						    _ICL_AUX_ANAOVRD1_A, \
+						    _ICL_AUX_ANAOVRD1_B))
+#define   ICL_AUX_ANAOVRD1_LDO_BYPASS	(1 << 7)
+#define   ICL_AUX_ANAOVRD1_ENABLE	(1 << 0)
+
 /* HDCP Key Registers */
 #define HDCP_KEY_CONF			_MMIO(0x66c00)
 #define  HDCP_AKSV_SEND_TRIGGER		BIT(31)
@@ -9013,11 +9116,45 @@ enum skl_power_gate {
 #define  HDCP_STATUS_CIPHER		BIT(16)
 #define  HDCP_STATUS_FRAME_CNT(x)	(((x) >> 8) & 0xff)
 
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE		0x66800
+#define _PORTB_HDCP2_BASE		0x66500
+#define _PORTC_HDCP2_BASE		0x66600
+#define _PORTD_HDCP2_BASE		0x66700
+#define _PORTE_HDCP2_BASE		0x66A00
+#define _PORTF_HDCP2_BASE		0x66900
+#define _PORT_HDCP2_BASE(port, x)	_MMIO(_PICK((port), \
+					  _PORTA_HDCP2_BASE, \
+					  _PORTB_HDCP2_BASE, \
+					  _PORTC_HDCP2_BASE, \
+					  _PORTD_HDCP2_BASE, \
+					  _PORTE_HDCP2_BASE, \
+					  _PORTF_HDCP2_BASE) + (x))
+
+#define HDCP2_AUTH_DDI(port)		_PORT_HDCP2_BASE(port, 0x98)
+#define   AUTH_LINK_AUTHENTICATED	BIT(31)
+#define   AUTH_LINK_TYPE		BIT(30)
+#define   AUTH_FORCE_CLR_INPUTCTR	BIT(19)
+#define   AUTH_CLR_KEYS			BIT(18)
+
+#define HDCP2_CTL_DDI(port)		_PORT_HDCP2_BASE(port, 0xB0)
+#define   CTL_LINK_ENCRYPTION_REQ	BIT(31)
+
+#define HDCP2_STATUS_DDI(port)		_PORT_HDCP2_BASE(port, 0xB4)
+#define   STREAM_ENCRYPTION_STATUS_A	BIT(31)
+#define   STREAM_ENCRYPTION_STATUS_B	BIT(30)
+#define   STREAM_ENCRYPTION_STATUS_C	BIT(29)
+#define   LINK_TYPE_STATUS		BIT(22)
+#define   LINK_AUTH_STATUS		BIT(21)
+#define   LINK_ENCRYPTION_STATUS	BIT(20)
+
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A		0x60400
 #define _TRANS_DDI_FUNC_CTL_B		0x61400
 #define _TRANS_DDI_FUNC_CTL_C		0x62400
 #define _TRANS_DDI_FUNC_CTL_EDP		0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0	0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1	0x6bc00
 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
 
 #define  TRANS_DDI_FUNC_ENABLE		(1 << 31)
@@ -9055,11 +9192,25 @@ enum skl_power_gate {
 					| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
 					| TRANS_DDI_HDMI_SCRAMBLING)
 
+#define _TRANS_DDI_FUNC_CTL2_A		0x60404
+#define _TRANS_DDI_FUNC_CTL2_B		0x61404
+#define _TRANS_DDI_FUNC_CTL2_C		0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP	0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0	0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1	0x6bc04
+#define TRANS_DDI_FUNC_CTL2(tran)	_MMIO_TRANS2(tran, \
+						     _TRANS_DDI_FUNC_CTL2_A)
+#define  PORT_SYNC_MODE_ENABLE			(1 << 4)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)	((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT_MASK	(0x7 << 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT	0
+
 /* DisplayPort Transport Control */
 #define _DP_TP_CTL_A			0x64040
 #define _DP_TP_CTL_B			0x64140
 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
 #define  DP_TP_CTL_ENABLE			(1 << 31)
+#define  DP_TP_CTL_FEC_ENABLE			(1 << 30)
 #define  DP_TP_CTL_MODE_SST			(0 << 27)
 #define  DP_TP_CTL_MODE_MST			(1 << 27)
 #define  DP_TP_CTL_FORCE_ACT			(1 << 25)
@@ -9078,6 +9229,7 @@ enum skl_power_gate {
 #define _DP_TP_STATUS_A			0x64044
 #define _DP_TP_STATUS_B			0x64144
 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define  DP_TP_STATUS_FEC_ENABLE_LIVE		(1 << 28)
 #define  DP_TP_STATUS_IDLE_DONE			(1 << 25)
 #define  DP_TP_STATUS_ACT_SENT			(1 << 24)
 #define  DP_TP_STATUS_MODE_STATUS_MST		(1 << 23)
@@ -9226,6 +9378,8 @@ enum skl_power_gate {
 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
 
 #define  TRANS_MSA_SYNC_CLK		(1 << 0)
+#define  TRANS_MSA_SAMPLING_444		(2 << 1)
+#define  TRANS_MSA_CLRSP_YCBCR		(2 << 3)
 #define  TRANS_MSA_6_BPC		(0 << 5)
 #define  TRANS_MSA_8_BPC		(1 << 5)
 #define  TRANS_MSA_10_BPC		(2 << 5)
@@ -9793,6 +9947,10 @@ enum skl_power_gate {
 #define _MIPI_PORT(port, a, c)	(((port) == PORT_A) ? a : c)	/* ports A and C only */
 #define _MMIO_MIPI(port, a, c)	_MMIO(_MIPI_PORT(port, a, c))
 
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1)	_MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+						    dsi0, dsi1)
+
 #define MIPIO_TXESC_CLK_DIV1			_MMIO(0x160004)
 #define  GLK_TX_ESC_CLK_DIV1_MASK			0x3FF
 #define MIPIO_TXESC_CLK_DIV2			_MMIO(0x160008)
@@ -9956,6 +10114,39 @@ enum skl_power_gate {
 						    _ICL_DSI_IO_MODECTL_1)
 #define  COMBO_PHY_MODE_DSI				(1 << 0)
 
+/* Display Stream Splitter Control */
+#define DSS_CTL1				_MMIO(0x67400)
+#define  SPLITTER_ENABLE			(1 << 31)
+#define  JOINER_ENABLE				(1 << 30)
+#define  DUAL_LINK_MODE_INTERLEAVE		(1 << 24)
+#define  DUAL_LINK_MODE_FRONTBACK		(0 << 24)
+#define  OVERLAP_PIXELS_MASK			(0xf << 16)
+#define  OVERLAP_PIXELS(pixels)			((pixels) << 16)
+#define  LEFT_DL_BUF_TARGET_DEPTH_MASK		(0xfff << 0)
+#define  LEFT_DL_BUF_TARGET_DEPTH(pixels)	((pixels) << 0)
+#define  MAX_DL_BUFFER_TARGET_DEPTH		0x5a0
+
+#define DSS_CTL2				_MMIO(0x67404)
+#define  LEFT_BRANCH_VDSC_ENABLE		(1 << 31)
+#define  RIGHT_BRANCH_VDSC_ENABLE		(1 << 15)
+#define  RIGHT_DL_BUF_TARGET_DEPTH_MASK		(0xfff << 0)
+#define  RIGHT_DL_BUF_TARGET_DEPTH(pixels)	((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB			0x78200
+#define _ICL_PIPE_DSS_CTL1_PC			0x78400
+#define ICL_PIPE_DSS_CTL1(pipe)			_MMIO_PIPE((pipe) - PIPE_B, \
+							   _ICL_PIPE_DSS_CTL1_PB, \
+							   _ICL_PIPE_DSS_CTL1_PC)
+#define  BIG_JOINER_ENABLE			(1 << 29)
+#define  MASTER_BIG_JOINER_ENABLE		(1 << 28)
+#define  VGA_CENTERING_ENABLE			(1 << 27)
+
+#define _ICL_PIPE_DSS_CTL2_PB			0x78204
+#define _ICL_PIPE_DSS_CTL2_PC			0x78404
+#define ICL_PIPE_DSS_CTL2(pipe)			_MMIO_PIPE((pipe) - PIPE_B, \
+							   _ICL_PIPE_DSS_CTL2_PB, \
+							   _ICL_PIPE_DSS_CTL2_PC)
+
 #define BXT_P_DSI_REGULATOR_CFG			_MMIO(0x160020)
 #define  STAP_SELECT					(1 << 0)
 
@@ -10292,6 +10483,235 @@ enum skl_power_gate {
 						   _ICL_DSI_T_INIT_MASTER_0,\
 						   _ICL_DSI_T_INIT_MASTER_1)
 
+#define _DPHY_CLK_TIMING_PARAM_0	0x162180
+#define _DPHY_CLK_TIMING_PARAM_1	0x6c180
+#define DPHY_CLK_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DPHY_CLK_TIMING_PARAM_0,\
+						   _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0		0x6b080
+#define _DSI_CLK_TIMING_PARAM_1		0x6b880
+#define DSI_CLK_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DSI_CLK_TIMING_PARAM_0,\
+						   _DSI_CLK_TIMING_PARAM_1)
+#define  CLK_PREPARE_OVERRIDE		(1 << 31)
+#define  CLK_PREPARE(x)		((x) << 28)
+#define  CLK_PREPARE_MASK		(0x7 << 28)
+#define  CLK_PREPARE_SHIFT		28
+#define  CLK_ZERO_OVERRIDE		(1 << 27)
+#define  CLK_ZERO(x)			((x) << 20)
+#define  CLK_ZERO_MASK			(0xf << 20)
+#define  CLK_ZERO_SHIFT		20
+#define  CLK_PRE_OVERRIDE		(1 << 19)
+#define  CLK_PRE(x)			((x) << 16)
+#define  CLK_PRE_MASK			(0x3 << 16)
+#define  CLK_PRE_SHIFT			16
+#define  CLK_POST_OVERRIDE		(1 << 15)
+#define  CLK_POST(x)			((x) << 8)
+#define  CLK_POST_MASK			(0x7 << 8)
+#define  CLK_POST_SHIFT		8
+#define  CLK_TRAIL_OVERRIDE		(1 << 7)
+#define  CLK_TRAIL(x)			((x) << 0)
+#define  CLK_TRAIL_MASK		(0xf << 0)
+#define  CLK_TRAIL_SHIFT		0
+
+#define _DPHY_DATA_TIMING_PARAM_0	0x162184
+#define _DPHY_DATA_TIMING_PARAM_1	0x6c184
+#define DPHY_DATA_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DPHY_DATA_TIMING_PARAM_0,\
+						   _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0	0x6B084
+#define _DSI_DATA_TIMING_PARAM_1	0x6B884
+#define DSI_DATA_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DSI_DATA_TIMING_PARAM_0,\
+						   _DSI_DATA_TIMING_PARAM_1)
+#define  HS_PREPARE_OVERRIDE		(1 << 31)
+#define  HS_PREPARE(x)			((x) << 24)
+#define  HS_PREPARE_MASK		(0x7 << 24)
+#define  HS_PREPARE_SHIFT		24
+#define  HS_ZERO_OVERRIDE		(1 << 23)
+#define  HS_ZERO(x)			((x) << 16)
+#define  HS_ZERO_MASK			(0xf << 16)
+#define  HS_ZERO_SHIFT			16
+#define  HS_TRAIL_OVERRIDE		(1 << 15)
+#define  HS_TRAIL(x)			((x) << 8)
+#define  HS_TRAIL_MASK			(0x7 << 8)
+#define  HS_TRAIL_SHIFT		8
+#define  HS_EXIT_OVERRIDE		(1 << 7)
+#define  HS_EXIT(x)			((x) << 0)
+#define  HS_EXIT_MASK			(0x7 << 0)
+#define  HS_EXIT_SHIFT			0
+
+#define _DPHY_TA_TIMING_PARAM_0		0x162188
+#define _DPHY_TA_TIMING_PARAM_1		0x6c188
+#define DPHY_TA_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DPHY_TA_TIMING_PARAM_0,\
+						   _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0		0x6b098
+#define _DSI_TA_TIMING_PARAM_1		0x6b898
+#define DSI_TA_TIMING_PARAM(port)	_MMIO_PORT(port,	\
+						   _DSI_TA_TIMING_PARAM_0,\
+						   _DSI_TA_TIMING_PARAM_1)
+#define  TA_SURE_OVERRIDE		(1 << 31)
+#define  TA_SURE(x)			((x) << 16)
+#define  TA_SURE_MASK			(0x1f << 16)
+#define  TA_SURE_SHIFT			16
+#define  TA_GO_OVERRIDE		(1 << 15)
+#define  TA_GO(x)			((x) << 8)
+#define  TA_GO_MASK			(0xf << 8)
+#define  TA_GO_SHIFT			8
+#define  TA_GET_OVERRIDE		(1 << 7)
+#define  TA_GET(x)			((x) << 0)
+#define  TA_GET_MASK			(0xf << 0)
+#define  TA_GET_SHIFT			0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0		0x6b030
+#define _DSI_TRANS_FUNC_CONF_1		0x6b830
+#define DSI_TRANS_FUNC_CONF(tc)		_MMIO_DSI(tc,	\
+						  _DSI_TRANS_FUNC_CONF_0,\
+						  _DSI_TRANS_FUNC_CONF_1)
+#define  OP_MODE_MASK			(0x3 << 28)
+#define  OP_MODE_SHIFT			28
+#define  CMD_MODE_NO_GATE		(0x0 << 28)
+#define  CMD_MODE_TE_GATE		(0x1 << 28)
+#define  VIDEO_MODE_SYNC_EVENT		(0x2 << 28)
+#define  VIDEO_MODE_SYNC_PULSE		(0x3 << 28)
+#define  LINK_READY			(1 << 20)
+#define  PIX_FMT_MASK			(0x3 << 16)
+#define  PIX_FMT_SHIFT			16
+#define  PIX_FMT_RGB565			(0x0 << 16)
+#define  PIX_FMT_RGB666_PACKED		(0x1 << 16)
+#define  PIX_FMT_RGB666_LOOSE		(0x2 << 16)
+#define  PIX_FMT_RGB888			(0x3 << 16)
+#define  PIX_FMT_RGB101010		(0x4 << 16)
+#define  PIX_FMT_RGB121212		(0x5 << 16)
+#define  PIX_FMT_COMPRESSED		(0x6 << 16)
+#define  BGR_TRANSMISSION		(1 << 15)
+#define  PIX_VIRT_CHAN(x)		((x) << 12)
+#define  PIX_VIRT_CHAN_MASK		(0x3 << 12)
+#define  PIX_VIRT_CHAN_SHIFT		12
+#define  PIX_BUF_THRESHOLD_MASK		(0x3 << 10)
+#define  PIX_BUF_THRESHOLD_SHIFT	10
+#define  PIX_BUF_THRESHOLD_1_4		(0x0 << 10)
+#define  PIX_BUF_THRESHOLD_1_2		(0x1 << 10)
+#define  PIX_BUF_THRESHOLD_3_4		(0x2 << 10)
+#define  PIX_BUF_THRESHOLD_FULL		(0x3 << 10)
+#define  CONTINUOUS_CLK_MASK		(0x3 << 8)
+#define  CONTINUOUS_CLK_SHIFT		8
+#define  CLK_ENTER_LP_AFTER_DATA	(0x0 << 8)
+#define  CLK_HS_OR_LP			(0x2 << 8)
+#define  CLK_HS_CONTINUOUS		(0x3 << 8)
+#define  LINK_CALIBRATION_MASK		(0x3 << 4)
+#define  LINK_CALIBRATION_SHIFT		4
+#define  CALIBRATION_DISABLED		(0x0 << 4)
+#define  CALIBRATION_ENABLED_INITIAL_ONLY	(0x2 << 4)
+#define  CALIBRATION_ENABLED_INITIAL_PERIODIC	(0x3 << 4)
+#define  S3D_ORIENTATION_LANDSCAPE	(1 << 1)
+#define  EOTP_DISABLED			(1 << 0)
+
+#define _DSI_CMD_RXCTL_0		0x6b0d4
+#define _DSI_CMD_RXCTL_1		0x6b8d4
+#define DSI_CMD_RXCTL(tc)		_MMIO_DSI(tc,	\
+						  _DSI_CMD_RXCTL_0,\
+						  _DSI_CMD_RXCTL_1)
+#define  READ_UNLOADS_DW		(1 << 16)
+#define  RECEIVED_UNASSIGNED_TRIGGER	(1 << 15)
+#define  RECEIVED_ACKNOWLEDGE_TRIGGER	(1 << 14)
+#define  RECEIVED_TEAR_EFFECT_TRIGGER	(1 << 13)
+#define  RECEIVED_RESET_TRIGGER		(1 << 12)
+#define  RECEIVED_PAYLOAD_WAS_LOST	(1 << 11)
+#define  RECEIVED_CRC_WAS_LOST		(1 << 10)
+#define  NUMBER_RX_PLOAD_DW_MASK	(0xff << 0)
+#define  NUMBER_RX_PLOAD_DW_SHIFT	0
+
+#define _DSI_CMD_TXCTL_0		0x6b0d0
+#define _DSI_CMD_TXCTL_1		0x6b8d0
+#define DSI_CMD_TXCTL(tc)		_MMIO_DSI(tc,	\
+						  _DSI_CMD_TXCTL_0,\
+						  _DSI_CMD_TXCTL_1)
+#define  KEEP_LINK_IN_HS		(1 << 24)
+#define  FREE_HEADER_CREDIT_MASK	(0x1f << 8)
+#define  FREE_HEADER_CREDIT_SHIFT	0x8
+#define  FREE_PLOAD_CREDIT_MASK		(0xff << 0)
+#define  FREE_PLOAD_CREDIT_SHIFT	0
+#define  MAX_HEADER_CREDIT		0x10
+#define  MAX_PLOAD_CREDIT		0x40
+
+#define _DSI_CMD_TXHDR_0		0x6b100
+#define _DSI_CMD_TXHDR_1		0x6b900
+#define DSI_CMD_TXHDR(tc)		_MMIO_DSI(tc,	\
+						  _DSI_CMD_TXHDR_0,\
+						  _DSI_CMD_TXHDR_1)
+#define  PAYLOAD_PRESENT		(1 << 31)
+#define  LP_DATA_TRANSFER		(1 << 30)
+#define  VBLANK_FENCE			(1 << 29)
+#define  PARAM_WC_MASK			(0xffff << 8)
+#define  PARAM_WC_LOWER_SHIFT		8
+#define  PARAM_WC_UPPER_SHIFT		16
+#define  VC_MASK			(0x3 << 6)
+#define  VC_SHIFT			6
+#define  DT_MASK			(0x3f << 0)
+#define  DT_SHIFT			0
+
+#define _DSI_CMD_TXPYLD_0		0x6b104
+#define _DSI_CMD_TXPYLD_1		0x6b904
+#define DSI_CMD_TXPYLD(tc)		_MMIO_DSI(tc,	\
+						  _DSI_CMD_TXPYLD_0,\
+						  _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0			0x6b0d8
+#define _DSI_LP_MSG_1			0x6b8d8
+#define DSI_LP_MSG(tc)			_MMIO_DSI(tc,	\
+						  _DSI_LP_MSG_0,\
+						  _DSI_LP_MSG_1)
+#define  LPTX_IN_PROGRESS		(1 << 17)
+#define  LINK_IN_ULPS			(1 << 16)
+#define  LINK_ULPS_TYPE_LP11		(1 << 8)
+#define  LINK_ENTER_ULPS		(1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0			0x6b044
+#define _DSI_HSTX_TO_1			0x6b844
+#define DSI_HSTX_TO(tc)			_MMIO_DSI(tc,	\
+						  _DSI_HSTX_TO_0,\
+						  _DSI_HSTX_TO_1)
+#define  HSTX_TIMEOUT_VALUE_MASK	(0xffff << 16)
+#define  HSTX_TIMEOUT_VALUE_SHIFT	16
+#define  HSTX_TIMEOUT_VALUE(x)		((x) << 16)
+#define  HSTX_TIMED_OUT			(1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0		0x6b048
+#define _DSI_LPRX_HOST_TO_1		0x6b848
+#define DSI_LPRX_HOST_TO(tc)		_MMIO_DSI(tc,	\
+						  _DSI_LPRX_HOST_TO_0,\
+						  _DSI_LPRX_HOST_TO_1)
+#define  LPRX_TIMED_OUT			(1 << 16)
+#define  LPRX_TIMEOUT_VALUE_MASK	(0xffff << 0)
+#define  LPRX_TIMEOUT_VALUE_SHIFT	0
+#define  LPRX_TIMEOUT_VALUE(x)		((x) << 0)
+
+#define _DSI_PWAIT_TO_0			0x6b040
+#define _DSI_PWAIT_TO_1			0x6b840
+#define DSI_PWAIT_TO(tc)		_MMIO_DSI(tc,	\
+						  _DSI_PWAIT_TO_0,\
+						  _DSI_PWAIT_TO_1)
+#define  PRESET_TIMEOUT_VALUE_MASK	(0xffff << 16)
+#define  PRESET_TIMEOUT_VALUE_SHIFT	16
+#define  PRESET_TIMEOUT_VALUE(x)	((x) << 16)
+#define  PRESPONSE_TIMEOUT_VALUE_MASK	(0xffff << 0)
+#define  PRESPONSE_TIMEOUT_VALUE_SHIFT	0
+#define  PRESPONSE_TIMEOUT_VALUE(x)	((x) << 0)
+
+#define _DSI_TA_TO_0			0x6b04c
+#define _DSI_TA_TO_1			0x6b84c
+#define DSI_TA_TO(tc)			_MMIO_DSI(tc,	\
+						  _DSI_TA_TO_0,\
+						  _DSI_TA_TO_1)
+#define  TA_TIMED_OUT			(1 << 16)
+#define  TA_TIMEOUT_VALUE_MASK		(0xffff << 0)
+#define  TA_TIMEOUT_VALUE_SHIFT		0
+#define  TA_TIMEOUT_VALUE(x)		((x) << 0)
+
 /* bits 31:0 */
 #define _MIPIA_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb084)
 #define _MIPIC_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb884)
@@ -10404,10 +10824,6 @@ enum skl_power_gate {
 #define MIPI_READ_DATA_VALID(port)	_MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
 #define  READ_DATA_VALID(n)				(1 << (n))
 
-/* For UMS only (deprecated): */
-#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-
 /* MOCS (Memory Object Control State) registers */
 #define GEN9_LNCFCMOCS(i)	_MMIO(0xb020 + (i) * 4)	/* L3 Cache Control */
 
@@ -10693,6 +11109,7 @@ enum skl_power_gate {
 #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe)	_MMIO_PIPE((pipe) - PIPE_B, \
 							   _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
 							   _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define  DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame)	((slice_row_per_frame) << 20)
 #define  DSC_SLICE_PER_LINE(slice_per_line)		((slice_per_line) << 16)
 #define  DSC_SLICE_CHUNK_SIZE(slice_chunk_size)		((slice_chunk_size) << 0)
 
@@ -10747,17 +11164,17 @@ enum skl_power_gate {
 						_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
 						_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
 
-#define PORT_TX_DFLEXDPSP			_MMIO(0x1638A0)
+#define PORT_TX_DFLEXDPSP			_MMIO(FIA1_BASE + 0x008A0)
 #define   TC_LIVE_STATE_TBT(tc_port)		(1 << ((tc_port) * 8 + 6))
 #define   TC_LIVE_STATE_TC(tc_port)		(1 << ((tc_port) * 8 + 5))
 #define   DP_LANE_ASSIGNMENT_SHIFT(tc_port)	((tc_port) * 8)
 #define   DP_LANE_ASSIGNMENT_MASK(tc_port)	(0xf << ((tc_port) * 8))
 #define   DP_LANE_ASSIGNMENT(tc_port, x)	((x) << ((tc_port) * 8))
 
-#define PORT_TX_DFLEXDPPMS				_MMIO(0x163890)
+#define PORT_TX_DFLEXDPPMS				_MMIO(FIA1_BASE + 0x00890)
 #define   DP_PHY_MODE_STATUS_COMPLETED(tc_port)		(1 << (tc_port))
 
-#define PORT_TX_DFLEXDPCSSS				_MMIO(0x163894)
+#define PORT_TX_DFLEXDPCSSS			_MMIO(FIA1_BASE + 0x00894)
 #define   DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)		(1 << (tc_port))
 
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385..ca95ab2 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
 	spin_unlock(&file_priv->mm.lock);
 }
 
-static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
-{
-	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct drm_i915_private *i915,
-		     struct i915_dependency *dep)
-{
-	kmem_cache_free(i915->dependencies, dep);
-}
-
-static void
-__i915_sched_node_add_dependency(struct i915_sched_node *node,
-				 struct i915_sched_node *signal,
-				 struct i915_dependency *dep,
-				 unsigned long flags)
-{
-	INIT_LIST_HEAD(&dep->dfs_link);
-	list_add(&dep->wait_link, &signal->waiters_list);
-	list_add(&dep->signal_link, &node->signalers_list);
-	dep->signaler = signal;
-	dep->flags = flags;
-}
-
-static int
-i915_sched_node_add_dependency(struct drm_i915_private *i915,
-			       struct i915_sched_node *node,
-			       struct i915_sched_node *signal)
-{
-	struct i915_dependency *dep;
-
-	dep = i915_dependency_alloc(i915);
-	if (!dep)
-		return -ENOMEM;
-
-	__i915_sched_node_add_dependency(node, signal, dep,
-					 I915_DEPENDENCY_ALLOC);
-	return 0;
-}
-
-static void
-i915_sched_node_fini(struct drm_i915_private *i915,
-		     struct i915_sched_node *node)
-{
-	struct i915_dependency *dep, *tmp;
-
-	GEM_BUG_ON(!list_empty(&node->link));
-
-	/*
-	 * Everyone we depended upon (the fences we wait to be signaled)
-	 * should retire before us and remove themselves from our list.
-	 * However, retirement is run independently on each timeline and
-	 * so we may be called out-of-order.
-	 */
-	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
-		GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
-		GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
-		list_del(&dep->wait_link);
-		if (dep->flags & I915_DEPENDENCY_ALLOC)
-			i915_dependency_free(i915, dep);
-	}
-
-	/* Remove ourselves from everyone who depends upon us */
-	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
-		GEM_BUG_ON(dep->signaler != node);
-		GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
-		list_del(&dep->signal_link);
-		if (dep->flags & I915_DEPENDENCY_ALLOC)
-			i915_dependency_free(i915, dep);
-	}
-}
-
-static void
-i915_sched_node_init(struct i915_sched_node *node)
-{
-	INIT_LIST_HEAD(&node->signalers_list);
-	INIT_LIST_HEAD(&node->waiters_list);
-	INIT_LIST_HEAD(&node->link);
-	node->attr.priority = I915_PRIORITY_INVALID;
-}
-
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
 	struct intel_engine_cs *engine;
@@ -221,6 +136,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 			  intel_engine_get_seqno(engine),
 			  seqno);
 
+		if (seqno == engine->timeline.seqno)
+			continue;
+
+		kthread_park(engine->breadcrumbs.signaler);
+
 		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
 			/* Flush any waiters before we reuse the seqno */
 			intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +155,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 		/* Finally reset hw state */
 		intel_engine_init_global_seqno(engine, seqno);
 		engine->timeline.seqno = seqno;
+
+		kthread_unpark(engine->breadcrumbs.signaler);
 	}
 
 	list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +662,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		if (rq)
 			cond_synchronize_rcu(rq->rcustate);
 
-		/*
-		 * We've forced the client to stall and catch up with whatever
-		 * backlog there might have been. As we are assuming that we
-		 * caused the mempressure, now is an opportune time to
-		 * recover as much memory from the request pool as is possible.
-		 * Having already penalized the client to stall, we spend
-		 * a little extra time to re-optimise page allocation.
-		 */
-		kmem_cache_shrink(i915->requests);
-		rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
-
 		rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
 		if (!rq) {
 			ret = -ENOMEM;
@@ -1127,8 +1038,20 @@ void i915_request_add(struct i915_request *request)
 	 */
 	local_bh_disable();
 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
-	if (engine->schedule)
-		engine->schedule(request, &request->gem_context->sched);
+	if (engine->schedule) {
+		struct i915_sched_attr attr = request->gem_context->sched;
+
+		/*
+		 * Boost priorities to new clients (new request flows).
+		 *
+		 * Allow interactive/synchronous clients to jump ahead of
+		 * the bulk clients. (FQ_CODEL)
+		 */
+		if (!prev || i915_request_completed(prev))
+			attr.priority |= I915_PRIORITY_NEWCLIENT;
+
+		engine->schedule(request, &attr);
+	}
 	rcu_read_unlock();
 	i915_sw_fence_commit(&request->submit);
 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1233,8 @@ long i915_request_wait(struct i915_request *rq,
 		add_wait_queue(errq, &reset);
 
 	intel_wait_init(&wait);
+	if (flags & I915_WAIT_PRIORITY)
+		i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
 
 restart:
 	do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b0..90e9d17 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
 	__attribute__((nonnull(1)));
 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
+#define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
+#define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
 
 static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
 					    u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
 	return __i915_request_completed(rq, seqno);
 }
 
-static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
-{
-	const struct i915_request *rq =
-		container_of(node, const struct i915_request, sched);
-
-	return i915_request_completed(rq);
-}
-
 void i915_retire_requests(struct drm_i915_private *i915);
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 0000000..340faea
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+static DEFINE_SPINLOCK(schedule_lock);
+
+static const struct i915_request *
+node_to_request(const struct i915_sched_node *node)
+{
+	return container_of(node, const struct i915_request, sched);
+}
+
+static inline bool node_signaled(const struct i915_sched_node *node)
+{
+	return i915_request_completed(node_to_request(node));
+}
+
+void i915_sched_node_init(struct i915_sched_node *node)
+{
+	INIT_LIST_HEAD(&node->signalers_list);
+	INIT_LIST_HEAD(&node->waiters_list);
+	INIT_LIST_HEAD(&node->link);
+	node->attr.priority = I915_PRIORITY_INVALID;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+		     struct i915_dependency *dep)
+{
+	kmem_cache_free(i915->dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+				      struct i915_sched_node *signal,
+				      struct i915_dependency *dep,
+				      unsigned long flags)
+{
+	bool ret = false;
+
+	spin_lock(&schedule_lock);
+
+	if (!node_signaled(signal)) {
+		INIT_LIST_HEAD(&dep->dfs_link);
+		list_add(&dep->wait_link, &signal->waiters_list);
+		list_add(&dep->signal_link, &node->signalers_list);
+		dep->signaler = signal;
+		dep->flags = flags;
+
+		ret = true;
+	}
+
+	spin_unlock(&schedule_lock);
+
+	return ret;
+}
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+				   struct i915_sched_node *node,
+				   struct i915_sched_node *signal)
+{
+	struct i915_dependency *dep;
+
+	dep = i915_dependency_alloc(i915);
+	if (!dep)
+		return -ENOMEM;
+
+	if (!__i915_sched_node_add_dependency(node, signal, dep,
+					      I915_DEPENDENCY_ALLOC))
+		i915_dependency_free(i915, dep);
+
+	return 0;
+}
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+			  struct i915_sched_node *node)
+{
+	struct i915_dependency *dep, *tmp;
+
+	GEM_BUG_ON(!list_empty(&node->link));
+
+	spin_lock(&schedule_lock);
+
+	/*
+	 * Everyone we depended upon (the fences we wait to be signaled)
+	 * should retire before us and remove themselves from our list.
+	 * However, retirement is run independently on each timeline and
+	 * so we may be called out-of-order.
+	 */
+	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+		GEM_BUG_ON(!node_signaled(dep->signaler));
+		GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+		list_del(&dep->wait_link);
+		if (dep->flags & I915_DEPENDENCY_ALLOC)
+			i915_dependency_free(i915, dep);
+	}
+
+	/* Remove ourselves from everyone who depends upon us */
+	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+		GEM_BUG_ON(dep->signaler != node);
+		GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+		list_del(&dep->signal_link);
+		if (dep->flags & I915_DEPENDENCY_ALLOC)
+			i915_dependency_free(i915, dep);
+	}
+
+	spin_unlock(&schedule_lock);
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+	return rb_entry(rb, struct i915_priolist, node);
+}
+
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+			     long queue_priority)
+{
+	struct rb_node *rb;
+	long last_prio, i;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+		   rb_first(&execlists->queue.rb_root));
+
+	last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+		const struct i915_priolist *p = to_priolist(rb);
+
+		GEM_BUG_ON(p->priority >= last_prio);
+		last_prio = p->priority;
+
+		GEM_BUG_ON(!p->used);
+		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+			if (list_empty(&p->requests[i]))
+				continue;
+
+			GEM_BUG_ON(!(p->used & BIT(i)));
+		}
+	}
+}
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_priolist *p;
+	struct rb_node **parent, *rb;
+	bool first = true;
+	int idx, i;
+
+	lockdep_assert_held(&engine->timeline.lock);
+	assert_priolists(execlists, INT_MAX);
+
+	/* buckets sorted from highest [in slot 0] to lowest priority */
+	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+	prio >>= I915_USER_PRIORITY_SHIFT;
+	if (unlikely(execlists->no_priolist))
+		prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+	/* most positive priority is scheduled first, equal priorities fifo */
+	rb = NULL;
+	parent = &execlists->queue.rb_root.rb_node;
+	while (*parent) {
+		rb = *parent;
+		p = to_priolist(rb);
+		if (prio > p->priority) {
+			parent = &rb->rb_left;
+		} else if (prio < p->priority) {
+			parent = &rb->rb_right;
+			first = false;
+		} else {
+			goto out;
+		}
+	}
+
+	if (prio == I915_PRIORITY_NORMAL) {
+		p = &execlists->default_priolist;
+	} else {
+		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+		/* Convert an allocation failure to a priority bump */
+		if (unlikely(!p)) {
+			prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+			/* To maintain ordering with all rendering, after an
+			 * allocation failure we have to disable all scheduling.
+			 * Requests will then be executed in fifo, and schedule
+			 * will ensure that dependencies are emitted in fifo.
+			 * There will be still some reordering with existing
+			 * requests, so if userspace lied about their
+			 * dependencies that reordering may be visible.
+			 */
+			execlists->no_priolist = true;
+			goto find_priolist;
+		}
+	}
+
+	p->priority = prio;
+	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+		INIT_LIST_HEAD(&p->requests[i]);
+	rb_link_node(&p->node, rb, parent);
+	rb_insert_color_cached(&p->node, &execlists->queue, first);
+	p->used = 0;
+
+out:
+	p->used |= BIT(idx);
+	return &p->requests[idx];
+}
+
+static struct intel_engine_cs *
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+{
+	struct intel_engine_cs *engine = node_to_request(node)->engine;
+
+	GEM_BUG_ON(!locked);
+
+	if (engine != locked) {
+		spin_unlock(&locked->timeline.lock);
+		spin_lock(&engine->timeline.lock);
+	}
+
+	return engine;
+}
+
+static void __i915_schedule(struct i915_request *rq,
+			    const struct i915_sched_attr *attr)
+{
+	struct list_head *uninitialized_var(pl);
+	struct intel_engine_cs *engine, *last;
+	struct i915_dependency *dep, *p;
+	struct i915_dependency stack;
+	const int prio = attr->priority;
+	LIST_HEAD(dfs);
+
+	/* Needed in order to use the temporary link inside i915_dependency */
+	lockdep_assert_held(&schedule_lock);
+	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
+	if (i915_request_completed(rq))
+		return;
+
+	if (prio <= READ_ONCE(rq->sched.attr.priority))
+		return;
+
+	stack.signaler = &rq->sched;
+	list_add(&stack.dfs_link, &dfs);
+
+	/*
+	 * Recursively bump all dependent priorities to match the new request.
+	 *
+	 * A naive approach would be to use recursion:
+	 * static void update_priorities(struct i915_sched_node *node, prio) {
+	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
+	 *		update_priorities(dep->signal, prio)
+	 *	queue_request(node);
+	 * }
+	 * but that may have unlimited recursion depth and so runs a very
+	 * real risk of overunning the kernel stack. Instead, we build
+	 * a flat list of all dependencies starting with the current request.
+	 * As we walk the list of dependencies, we add all of its dependencies
+	 * to the end of the list (this may include an already visited
+	 * request) and continue to walk onwards onto the new dependencies. The
+	 * end result is a topological list of requests in reverse order, the
+	 * last element in the list is the request we must execute first.
+	 */
+	list_for_each_entry(dep, &dfs, dfs_link) {
+		struct i915_sched_node *node = dep->signaler;
+
+		/*
+		 * Within an engine, there can be no cycle, but we may
+		 * refer to the same dependency chain multiple times
+		 * (redundant dependencies are not eliminated) and across
+		 * engines.
+		 */
+		list_for_each_entry(p, &node->signalers_list, signal_link) {
+			GEM_BUG_ON(p == dep); /* no cycles! */
+
+			if (node_signaled(p->signaler))
+				continue;
+
+			GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
+				list_move_tail(&p->dfs_link, &dfs);
+		}
+	}
+
+	/*
+	 * If we didn't need to bump any existing priorities, and we haven't
+	 * yet submitted this request (i.e. there is no potential race with
+	 * execlists_submit_request()), we can set our own priority and skip
+	 * acquiring the engine locks.
+	 */
+	if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
+		GEM_BUG_ON(!list_empty(&rq->sched.link));
+		rq->sched.attr = *attr;
+
+		if (stack.dfs_link.next == stack.dfs_link.prev)
+			return;
+
+		__list_del_entry(&stack.dfs_link);
+	}
+
+	last = NULL;
+	engine = rq->engine;
+	spin_lock_irq(&engine->timeline.lock);
+
+	/* Fifo and depth-first replacement ensure our deps execute before us */
+	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+		struct i915_sched_node *node = dep->signaler;
+
+		INIT_LIST_HEAD(&dep->dfs_link);
+
+		engine = sched_lock_engine(node, engine);
+
+		/* Recheck after acquiring the engine->timeline.lock */
+		if (prio <= node->attr.priority || node_signaled(node))
+			continue;
+
+		node->attr.priority = prio;
+		if (!list_empty(&node->link)) {
+			if (last != engine) {
+				pl = i915_sched_lookup_priolist(engine, prio);
+				last = engine;
+			}
+			list_move_tail(&node->link, pl);
+		} else {
+			/*
+			 * If the request is not in the priolist queue because
+			 * it is not yet runnable, then it doesn't contribute
+			 * to our preemption decisions. On the other hand,
+			 * if the request is on the HW, it too is not in the
+			 * queue; but in that case we may still need to reorder
+			 * the inflight requests.
+			 */
+			if (!i915_sw_fence_done(&node_to_request(node)->submit))
+				continue;
+		}
+
+		if (prio <= engine->execlists.queue_priority)
+			continue;
+
+		/*
+		 * If we are already the currently executing context, don't
+		 * bother evaluating if we should preempt ourselves.
+		 */
+		if (node_to_request(node)->global_seqno &&
+		    i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
+				      node_to_request(node)->global_seqno))
+			continue;
+
+		/* Defer (tasklet) submission until after all of our updates. */
+		engine->execlists.queue_priority = prio;
+		tasklet_hi_schedule(&engine->execlists.tasklet);
+	}
+
+	spin_unlock_irq(&engine->timeline.lock);
+}
+
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+	spin_lock(&schedule_lock);
+	__i915_schedule(rq, attr);
+	spin_unlock(&schedule_lock);
+}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+	struct i915_sched_attr attr;
+
+	GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+	if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+		return;
+
+	spin_lock_bh(&schedule_lock);
+
+	attr = rq->sched.attr;
+	attr.priority |= bump;
+	__i915_schedule(rq, &attr);
+
+	spin_unlock_bh(&schedule_lock);
+}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a4222..dbe9cb7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
 #define _I915_SCHEDULER_H_
 
 #include <linux/bitops.h>
+#include <linux/kernel.h>
 
 #include <uapi/drm/i915_drm.h>
 
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
 enum {
 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
+#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT	((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT	((u8)BIT(1))
+
 struct i915_sched_attr {
 	/**
 	 * @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
 #define I915_DEPENDENCY_ALLOC BIT(0)
 };
 
+void i915_sched_node_init(struct i915_sched_node *node);
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+				      struct i915_sched_node *signal,
+				      struct i915_dependency *dep,
+				      unsigned long flags);
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+				   struct i915_sched_node *node,
+				   struct i915_sched_node *signal);
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+			  struct i915_sched_node *node);
+
+void i915_schedule(struct i915_request *request,
+		   const struct i915_sched_attr *attr);
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+
 #endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed0..fc2eeab 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
 /*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
  */
 
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4d..0e055ea 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
 /*
+ * SPDX-License-Identifier: MIT
+ *
  * i915_sw_fence.h - library routines for N:M synchronisation points
  *
  * Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
  */
 
 #ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0c..60404db 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
 {
 	BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
 	BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
-	BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+	BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
 	*root = NULL;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6b..535caeb 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
 	NULL,
 };
 
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_i915_error_state_buf error_str;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	struct i915_gpu_state *gpu;
 	ssize_t ret;
 
-	ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
-	if (ret)
-		return ret;
+	gpu = i915_first_error_state(i915);
+	if (gpu) {
+		ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+		i915_gpu_state_put(gpu);
+	} else {
+		const char *str = "No error state collected\n";
+		size_t len = strlen(str);
 
-	gpu = i915_first_error_state(dev_priv);
-	ret = i915_error_state_to_str(&error_str, gpu);
-	if (ret)
-		goto out;
-
-	ret = count < error_str.bytes ? count : error_str.bytes;
-	memcpy(buf, error_str.buf, ret);
-
-out:
-	i915_gpu_state_put(gpu);
-	i915_error_state_buf_release(&error_str);
+		ret = min_t(size_t, count, len - off);
+		memcpy(buf, str + off, ret);
+	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab..ebd71b4 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
 			const char *name);
 void i915_timeline_fini(struct i915_timeline *tl);
 
+static inline void
+i915_timeline_set_subclass(struct i915_timeline *timeline,
+			   unsigned int subclass)
+{
+	lockdep_set_subclass(&timeline->lock, subclass);
+
+	/*
+	 * Due to an interesting quirk in lockdep's internal debug tracking,
+	 * after setting a subclass we must ensure the lock is used. Otherwise,
+	 * nr_unused_locks is incremented once too often.
+	 */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	local_irq_disable();
+	lock_map_acquire(&timeline->lock.dep_map);
+	lock_map_release(&timeline->lock.dep_map);
+	local_irq_enable();
+#endif
+}
+
 struct i915_timeline *
 i915_timeline_create(struct drm_i915_private *i915, const char *name);
 
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd25..9726df3 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
 			     __stringify(x), (long)(x))
 
 #if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
-	__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+	__builtin_add_overflow_p((A), (B), (T)0)
 #else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
 	typeof(A) a = (A); \
 	typeof(B) b = (B); \
-	a + b < a; \
+	(T)(a + b) < a; \
 })
 #endif
 
+#define add_overflows(A, B) \
+	add_overflows_t(typeof((A) + (B)), (A), (B))
+
 #define range_overflows(start, size, max) ({ \
 	typeof(start) start__ = (start); \
 	typeof(size) size__ = (size); \
@@ -68,7 +71,7 @@
 
 /* Note we don't consider signbits :| */
 #define overflows_type(x, T) \
-	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+	(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
 
 #define ptr_mask_bits(ptr, n) ({					\
 	unsigned long __v = (unsigned long)(ptr);			\
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 31efc97..5b4d78c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->size > vma->node.size);
 
-	if (GEM_WARN_ON(range_overflows(vma->node.start,
-					vma->node.size,
-					vma->vm->total)))
+	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
+					      vma->node.size,
+					      vma->vm->total)))
 		return -ENODEV;
 
-	if (GEM_WARN_ON(!flags))
+	if (GEM_DEBUG_WARN_ON(!flags))
 		return -EINVAL;
 
 	bind_flags = 0;
@@ -892,7 +892,7 @@ static void export_fence(struct i915_vma *vma,
 	reservation_object_lock(resv, NULL);
 	if (flags & EXEC_OBJECT_WRITE)
 		reservation_object_add_excl_fence(resv, &rq->fence);
-	else if (reservation_object_reserve_shared(resv) == 0)
+	else if (reservation_object_reserve_shared(resv, 1) == 0)
 		reservation_object_add_shared_fence(resv, &rq->fence);
 	reservation_object_unlock(resv);
 }
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e4..4dd793b 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,277 @@
  *   Jani Nikula <jani.nikula@intel.com>
  */
 
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
 #include "intel_dsi.h"
 
+static inline int header_credits_available(struct drm_i915_private *dev_priv,
+					   enum transcoder dsi_trans)
+{
+	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+		>> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+					    enum transcoder dsi_trans)
+{
+	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+		>> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+				    enum transcoder dsi_trans)
+{
+	if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+			MAX_HEADER_CREDIT, 100))
+		DRM_ERROR("DSI header credits not released\n");
+}
+
+static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+				     enum transcoder dsi_trans)
+{
+	if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+			MAX_PLOAD_CREDIT, 100))
+		DRM_ERROR("DSI payload credits not released\n");
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+	if (port == PORT_A)
+		return TRANSCODER_DSI_0;
+	else
+		return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct mipi_dsi_device *dsi;
+	enum port port;
+	enum transcoder dsi_trans;
+	int ret;
+
+	/* wait for header/payload credits to be released */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		wait_for_header_credits(dev_priv, dsi_trans);
+		wait_for_payload_credits(dev_priv, dsi_trans);
+	}
+
+	/* send nop DCS command */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi = intel_dsi->dsi_hosts[port]->device;
+		dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+		dsi->channel = 0;
+		ret = mipi_dsi_dcs_nop(dsi);
+		if (ret < 0)
+			DRM_ERROR("error sending DCS NOP command\n");
+	}
+
+	/* wait for header credits to be released */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		wait_for_header_credits(dev_priv, dsi_trans);
+	}
+
+	/* wait for LP TX in progress bit to be cleared */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+				  LPTX_IN_PROGRESS), 20))
+			DRM_ERROR("LPTX bit not cleared\n");
+	}
+}
+
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+			       u32 len)
+{
+	struct intel_dsi *intel_dsi = host->intel_dsi;
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+	int free_credits;
+	int i, j;
+
+	for (i = 0; i < len; i += 4) {
+		u32 tmp = 0;
+
+		free_credits = payload_credits_available(dev_priv, dsi_trans);
+		if (free_credits < 1) {
+			DRM_ERROR("Payload credit not available\n");
+			return false;
+		}
+
+		for (j = 0; j < min_t(u32, len - i, 4); j++)
+			tmp |= *data++ << 8 * j;
+
+		I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+	}
+
+	return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+			    struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+	struct intel_dsi *intel_dsi = host->intel_dsi;
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+	u32 tmp;
+	int free_credits;
+
+	/* check if header credit available */
+	free_credits = header_credits_available(dev_priv, dsi_trans);
+	if (free_credits < 1) {
+		DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+		return -1;
+	}
+
+	tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+	if (pkt.payload)
+		tmp |= PAYLOAD_PRESENT;
+	else
+		tmp &= ~PAYLOAD_PRESENT;
+
+	tmp &= ~VBLANK_FENCE;
+
+	if (enable_lpdt)
+		tmp |= LP_DATA_TRANSFER;
+
+	tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+	tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+	tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+	tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+	tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+	I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+	return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+			      struct mipi_dsi_packet pkt)
+{
+	/* payload queue can accept *256 bytes*, check limit */
+	if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+		DRM_ERROR("payload size exceeds max queue limit\n");
+		return -1;
+	}
+
+	/* load data into command payload queue */
+	if (!add_payld_to_queue(host, pkt.payload,
+				pkt.payload_length)) {
+		DRM_ERROR("adding payload to queue failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 tmp;
+	int lane;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+
+		/*
+		 * Program voltage swing and pre-emphasis level values as per
+		 * table in BSPEC under DDI buffer programing
+		 */
+		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+		tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+		tmp |= SCALING_MODE_SEL(0x2);
+		tmp |= TAP2_DISABLE | TAP3_DISABLE;
+		tmp |= RTERM_SELECT(0x6);
+		I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+
+		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+		tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+		tmp |= SCALING_MODE_SEL(0x2);
+		tmp |= TAP2_DISABLE | TAP3_DISABLE;
+		tmp |= RTERM_SELECT(0x6);
+		I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+
+		tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+		tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+			 RCOMP_SCALAR_MASK);
+		tmp |= SWING_SEL_UPPER(0x2);
+		tmp |= SWING_SEL_LOWER(0x2);
+		tmp |= RCOMP_SCALAR(0x98);
+		I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+		tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+		tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+			 RCOMP_SCALAR_MASK);
+		tmp |= SWING_SEL_UPPER(0x2);
+		tmp |= SWING_SEL_LOWER(0x2);
+		tmp |= RCOMP_SCALAR(0x98);
+		I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+
+		tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+		tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+			 CURSOR_COEFF_MASK);
+		tmp |= POST_CURSOR_1(0x0);
+		tmp |= POST_CURSOR_2(0x0);
+		tmp |= CURSOR_COEFF(0x3f);
+		I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+
+		for (lane = 0; lane <= 3; lane++) {
+			/* Bspec: must not use GRP register for write */
+			tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+			tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+				 CURSOR_COEFF_MASK);
+			tmp |= POST_CURSOR_1(0x0);
+			tmp |= POST_CURSOR_2(0x0);
+			tmp |= CURSOR_COEFF(0x3f);
+			I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+		}
+	}
+}
+
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 dss_ctl1;
+
+	dss_ctl1 = I915_READ(DSS_CTL1);
+	dss_ctl1 |= SPLITTER_ENABLE;
+	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+		const struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+		u32 dss_ctl2;
+		u16 hactive = adjusted_mode->crtc_hdisplay;
+		u16 dl_buffer_depth;
+
+		dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+		dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+		if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+			DRM_ERROR("DL buffer depth exceed max value\n");
+
+		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+		dss_ctl2 = I915_READ(DSS_CTL2);
+		dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+		dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+		I915_WRITE(DSS_CTL2, dss_ctl2);
+	} else {
+		/* Interleave */
+		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+	}
+
+	I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
 static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,23 +374,1079 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
 	}
 }
 
-static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 tmp;
+	int lane;
+
+	/* Step 4b(i) set loadgen select for transmit and aux lanes */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+		tmp &= ~LOADGEN_SELECT;
+		I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+		for (lane = 0; lane <= 3; lane++) {
+			tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+			tmp &= ~LOADGEN_SELECT;
+			if (lane != 2)
+				tmp |= LOADGEN_SELECT;
+			I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+		}
+	}
+
+	/* Step 4b(ii) set latency optimization for transmit and aux lanes */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+		tmp &= ~FRC_LATENCY_OPTIM_MASK;
+		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+		I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+		tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+		tmp &= ~FRC_LATENCY_OPTIM_MASK;
+		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+		I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+	}
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	/* clear common keeper enable bit */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+		tmp &= ~COMMON_KEEPER_EN;
+		I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+		tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+		tmp &= ~COMMON_KEEPER_EN;
+		I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+	}
+
+	/*
+	 * Set SUS Clock Config bitfield to 11b
+	 * Note: loadgen select program is done
+	 * as part of lane phy sequence configuration
+	 */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_CL_DW5(port));
+		tmp |= SUS_CLOCK_CONFIG;
+		I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+	}
+
+	/* Clear training enable to change swing values */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+		tmp &= ~TX_TRAINING_EN;
+		I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+		tmp &= ~TX_TRAINING_EN;
+		I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+	}
+
+	/* Program swing and de-emphasis */
+	dsi_program_swing_and_deemphasis(encoder);
+
+	/* Set training enable to trigger update */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+		tmp |= TX_TRAINING_EN;
+		I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+		tmp |= TX_TRAINING_EN;
+		I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+	}
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(DDI_BUF_CTL(port));
+		tmp |= DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+		if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+				  DDI_BUF_IS_IDLE),
+				  500))
+			DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+	}
+}
+
+static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	/* Program T-INIT master registers */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+		tmp &= ~MASTER_INIT_TIMER_MASK;
+		tmp |= intel_dsi->init_count;
+		I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+	}
+
+	/* Program DPHY clock lanes timings */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+
+		/* shadow register inside display core */
+		I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+	}
+
+	/* Program DPHY data lanes timings */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
+			   intel_dsi->dphy_data_lane_reg);
+
+		/* shadow register inside display core */
+		I915_WRITE(DSI_DATA_TIMING_PARAM(port),
+			   intel_dsi->dphy_data_lane_reg);
+	}
+
+	/*
+	 * If DSI link operating at or below an 800 MHz,
+	 * TA_SURE should be override and programmed to
+	 * a value '0' inside TA_PARAM_REGISTERS otherwise
+	 * leave all fields at HW default values.
+	 */
+	if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+		for_each_dsi_port(port, intel_dsi->ports) {
+			tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+			tmp &= ~TA_SURE_MASK;
+			tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+			I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+			/* shadow register inside display core */
+			tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+			tmp &= ~TA_SURE_MASK;
+			tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+			I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+		}
+	}
+}
+
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+	}
+
+	I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+	}
+
+	I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+	enum port port;
+	u32 val;
+
+	mutex_lock(&dev_priv->dpll_lock);
+
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+	}
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+	POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 tmp;
+	enum port port;
+	enum transcoder dsi_trans;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+
+		if (intel_dsi->eotp_pkt)
+			tmp &= ~EOTP_DISABLED;
+		else
+			tmp |= EOTP_DISABLED;
+
+		/* enable link calibration if freq > 1.5Gbps */
+		if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+			tmp &= ~LINK_CALIBRATION_MASK;
+			tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+		}
+
+		/* configure continuous clock */
+		tmp &= ~CONTINUOUS_CLK_MASK;
+		if (intel_dsi->clock_stop)
+			tmp |= CLK_ENTER_LP_AFTER_DATA;
+		else
+			tmp |= CLK_HS_CONTINUOUS;
+
+		/* configure buffer threshold limit to minimum */
+		tmp &= ~PIX_BUF_THRESHOLD_MASK;
+		tmp |= PIX_BUF_THRESHOLD_1_4;
+
+		/* set virtual channel to '0' */
+		tmp &= ~PIX_VIRT_CHAN_MASK;
+		tmp |= PIX_VIRT_CHAN(0);
+
+		/* program BGR transmission */
+		if (intel_dsi->bgr_enabled)
+			tmp |= BGR_TRANSMISSION;
+
+		/* select pixel format */
+		tmp &= ~PIX_FMT_MASK;
+		switch (intel_dsi->pixel_format) {
+		default:
+			MISSING_CASE(intel_dsi->pixel_format);
+			/* fallthrough */
+		case MIPI_DSI_FMT_RGB565:
+			tmp |= PIX_FMT_RGB565;
+			break;
+		case MIPI_DSI_FMT_RGB666_PACKED:
+			tmp |= PIX_FMT_RGB666_PACKED;
+			break;
+		case MIPI_DSI_FMT_RGB666:
+			tmp |= PIX_FMT_RGB666_LOOSE;
+			break;
+		case MIPI_DSI_FMT_RGB888:
+			tmp |= PIX_FMT_RGB888;
+			break;
+		}
+
+		/* program DSI operation mode */
+		if (is_vid_mode(intel_dsi)) {
+			tmp &= ~OP_MODE_MASK;
+			switch (intel_dsi->video_mode_format) {
+			default:
+				MISSING_CASE(intel_dsi->video_mode_format);
+				/* fallthrough */
+			case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+				tmp |= VIDEO_MODE_SYNC_EVENT;
+				break;
+			case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+				tmp |= VIDEO_MODE_SYNC_PULSE;
+				break;
+			}
+		}
+
+		I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+	}
+
+	/* enable port sync mode if dual link */
+	if (intel_dsi->dual_link) {
+		for_each_dsi_port(port, intel_dsi->ports) {
+			dsi_trans = dsi_port_to_transcoder(port);
+			tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+			tmp |= PORT_SYNC_MODE_ENABLE;
+			I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+		}
+
+		/* configure stream splitting */
+		configure_dual_link_mode(encoder, pipe_config);
+	}
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+
+		/* select data lane width */
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		tmp &= ~DDI_PORT_WIDTH_MASK;
+		tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+		/* select input pipe */
+		tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+		switch (pipe) {
+		default:
+			MISSING_CASE(pipe);
+			/* fallthrough */
+		case PIPE_A:
+			tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+			break;
+		case PIPE_B:
+			tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+			break;
+		case PIPE_C:
+			tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+			break;
+		}
+
+		/* enable DDI buffer */
+		tmp |= TRANS_DDI_FUNC_ENABLE;
+		I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+	}
+
+	/* wait for link ready */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
+				LINK_READY), 2500))
+			DRM_ERROR("DSI link not ready\n");
+	}
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	const struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+	enum port port;
+	enum transcoder dsi_trans;
+	/* horizontal timings */
+	u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+	u16 hfront_porch, hback_porch;
+	/* vertical timings */
+	u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+
+	hactive = adjusted_mode->crtc_hdisplay;
+	htotal = adjusted_mode->crtc_htotal;
+	hsync_start = adjusted_mode->crtc_hsync_start;
+	hsync_end = adjusted_mode->crtc_hsync_end;
+	hsync_size  = hsync_end - hsync_start;
+	hfront_porch = (adjusted_mode->crtc_hsync_start -
+			adjusted_mode->crtc_hdisplay);
+	hback_porch = (adjusted_mode->crtc_htotal -
+		       adjusted_mode->crtc_hsync_end);
+	vactive = adjusted_mode->crtc_vdisplay;
+	vtotal = adjusted_mode->crtc_vtotal;
+	vsync_start = adjusted_mode->crtc_vsync_start;
+	vsync_end = adjusted_mode->crtc_vsync_end;
+	vsync_shift = hsync_start - htotal / 2;
+
+	if (intel_dsi->dual_link) {
+		hactive /= 2;
+		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+			hactive += intel_dsi->pixel_overlap;
+		htotal /= 2;
+	}
+
+	/* minimum hactive as per bspec: 256 pixels */
+	if (adjusted_mode->crtc_hdisplay < 256)
+		DRM_ERROR("hactive is less then 256 pixels\n");
+
+	/* if RGB666 format, then hactive must be multiple of 4 pixels */
+	if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+		DRM_ERROR("hactive pixels are not multiple of 4\n");
+
+	/* program TRANS_HTOTAL register */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		I915_WRITE(HTOTAL(dsi_trans),
+			   (hactive - 1) | ((htotal - 1) << 16));
+	}
+
+	/* TRANS_HSYNC register to be programmed only for video mode */
+	if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+		if (intel_dsi->video_mode_format ==
+		    VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+			/* BSPEC: hsync size should be atleast 16 pixels */
+			if (hsync_size < 16)
+				DRM_ERROR("hsync size < 16 pixels\n");
+		}
+
+		if (hback_porch < 16)
+			DRM_ERROR("hback porch < 16 pixels\n");
+
+		if (intel_dsi->dual_link) {
+			hsync_start /= 2;
+			hsync_end /= 2;
+		}
+
+		for_each_dsi_port(port, intel_dsi->ports) {
+			dsi_trans = dsi_port_to_transcoder(port);
+			I915_WRITE(HSYNC(dsi_trans),
+				   (hsync_start - 1) | ((hsync_end - 1) << 16));
+		}
+	}
+
+	/* program TRANS_VTOTAL register */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		/*
+		 * FIXME: Programing this by assuming progressive mode, since
+		 * non-interlaced info from VBT is not saved inside
+		 * struct drm_display_mode.
+		 * For interlace mode: program required pixel minus 2
+		 */
+		I915_WRITE(VTOTAL(dsi_trans),
+			   (vactive - 1) | ((vtotal - 1) << 16));
+	}
+
+	if (vsync_end < vsync_start || vsync_end > vtotal)
+		DRM_ERROR("Invalid vsync_end value\n");
+
+	if (vsync_start < vactive)
+		DRM_ERROR("vsync_start less than vactive\n");
+
+	/* program TRANS_VSYNC register */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		I915_WRITE(VSYNC(dsi_trans),
+			   (vsync_start - 1) | ((vsync_end - 1) << 16));
+	}
+
+	/*
+	 * FIXME: It has to be programmed only for interlaced
+	 * modes. Put the check condition here once interlaced
+	 * info available as described above.
+	 * program TRANS_VSYNCSHIFT register
+	 */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+	}
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	enum transcoder dsi_trans;
+	u32 tmp;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(PIPECONF(dsi_trans));
+		tmp |= PIPECONF_ENABLE;
+		I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+		/* wait for transcoder to be enabled */
+		if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+					    I965_PIPECONF_ACTIVE,
+					    I965_PIPECONF_ACTIVE, 10))
+			DRM_ERROR("DSI transcoder not enabled\n");
+	}
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	enum transcoder dsi_trans;
+	u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+	/*
+	 * escape clock count calculation:
+	 * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+	 * UI (nsec) = (10^6)/Bitrate
+	 * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+	 * ESCAPE_CLK_COUNT  = TIME_NS/ESC_CLK_NS
+	 */
+	divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+	mul = 8 * 1000000;
+	hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+				     divisor);
+	lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+	ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+
+		/* program hst_tx_timeout */
+		tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+		tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+		tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+		I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+
+		/* FIXME: DSI_CALIB_TO */
+
+		/* program lp_rx_host timeout */
+		tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+		tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+		tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+		I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+		/* FIXME: DSI_PWAIT_TO */
+
+		/* program turn around timeout */
+		tmp = I915_READ(DSI_TA_TO(dsi_trans));
+		tmp &= ~TA_TIMEOUT_VALUE_MASK;
+		tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+		I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+	}
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *pipe_config)
 {
 	/* step 4a: power up all lanes of the DDI used by DSI */
 	gen11_dsi_power_up_lanes(encoder);
+
+	/* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+	gen11_dsi_config_phy_lanes_sequence(encoder);
+
+	/* step 4c: configure voltage swing and skew */
+	gen11_dsi_voltage_swing_program_seq(encoder);
+
+	/* enable DDI buffer */
+	gen11_dsi_enable_ddi_buffer(encoder);
+
+	/* setup D-PHY timings */
+	gen11_dsi_setup_dphy_timings(encoder);
+
+	/* step 4h: setup DSI protocol timeouts */
+	gen11_dsi_setup_timeouts(encoder);
+
+	/* Step (4h, 4i, 4j, 4k): Configure transcoder */
+	gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+	/* Step 4l: Gate DDI clocks */
+	gen11_dsi_gate_clocks(encoder);
 }
 
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
-		     const struct intel_crtc_state *pipe_config,
-		     const struct drm_connector_state *conn_state)
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	struct mipi_dsi_device *dsi;
+	enum port port;
+	enum transcoder dsi_trans;
+	u32 tmp;
+	int ret;
+
+	/* set maximum return packet size */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+
+		/*
+		 * FIXME: This uses the number of DW's currently in the payload
+		 * receive queue. This is probably not what we want here.
+		 */
+		tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+		tmp &= NUMBER_RX_PLOAD_DW_MASK;
+		/* multiply "Number Rx Payload DW" by 4 to get max value */
+		tmp = tmp * 4;
+		dsi = intel_dsi->dsi_hosts[port]->device;
+		ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+		if (ret < 0)
+			DRM_ERROR("error setting max return pkt size%d\n", tmp);
+	}
+
+	/* panel power on related mipi dsi vbt sequences */
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+	intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+	/* ensure all panel commands dispatched before enabling transcoder */
+	wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *pipe_config,
+				     const struct drm_connector_state *conn_state)
 {
 	/* step2: enable IO power */
 	gen11_dsi_enable_io_power(encoder);
 
 	/* step3: enable DSI PLL */
 	gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *pipe_config,
+				 const struct drm_connector_state *conn_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	/* step3b */
+	gen11_dsi_map_pll(encoder, pipe_config);
 
 	/* step4: enable DSI port and DPHY */
-	gen11_dsi_enable_port_and_phy(encoder);
+	gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+	/* step5: program and powerup panel */
+	gen11_dsi_powerup_panel(encoder);
+
+	/* step6c: configure transcoder timings */
+	gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+
+	/* step6d: enable dsi transcoder */
+	gen11_dsi_enable_transcoder(encoder);
+
+	/* step7: enable backlight */
+	intel_panel_enable_backlight(pipe_config, conn_state);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	enum transcoder dsi_trans;
+	u32 tmp;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+
+		/* disable transcoder */
+		tmp = I915_READ(PIPECONF(dsi_trans));
+		tmp &= ~PIPECONF_ENABLE;
+		I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+		/* wait for transcoder to be disabled */
+		if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+					    I965_PIPECONF_ACTIVE, 0, 50))
+			DRM_ERROR("DSI trancoder not disabled\n");
+	}
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+	/* ensure cmds dispatched to panel */
+	wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	enum transcoder dsi_trans;
+	u32 tmp;
+
+	/* put dsi link in ULPS */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+		tmp |= LINK_ENTER_ULPS;
+		tmp &= ~LINK_ULPS_TYPE_LP11;
+		I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+
+		if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
+				LINK_IN_ULPS),
+				10))
+			DRM_ERROR("DSI link not in ULPS\n");
+	}
+
+	/* disable ddi function */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		tmp &= ~TRANS_DDI_FUNC_ENABLE;
+		I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+	}
+
+	/* disable port sync mode if dual link */
+	if (intel_dsi->dual_link) {
+		for_each_dsi_port(port, intel_dsi->ports) {
+			dsi_trans = dsi_port_to_transcoder(port);
+			tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+			tmp &= ~PORT_SYNC_MODE_ENABLE;
+			I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+		}
+	}
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+
+	gen11_dsi_ungate_clocks(encoder);
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(DDI_BUF_CTL(port));
+		tmp &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+		if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+				 DDI_BUF_IS_IDLE),
+				 8))
+			DRM_ERROR("DDI port:%c buffer not idle\n",
+				  port_name(port));
+	}
+	gen11_dsi_ungate_clocks(encoder);
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	enum port port;
+	u32 tmp;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
+
+	if (intel_dsi->dual_link)
+		intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+
+	/* set mode to DDI */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+		tmp &= ~COMBO_PHY_MODE_DSI;
+		I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+	}
+}
+
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *old_crtc_state,
+			      const struct drm_connector_state *old_conn_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	/* step1: turn off backlight */
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+	intel_panel_disable_backlight(old_conn_state);
+
+	/* step2d,e: disable transcoder and wait */
+	gen11_dsi_disable_transcoder(encoder);
+
+	/* step2f,g: powerdown panel */
+	gen11_dsi_powerdown_panel(encoder);
+
+	/* step2h,i,j: deconfig trancoder */
+	gen11_dsi_deconfigure_trancoder(encoder);
+
+	/* step3: disable port */
+	gen11_dsi_disable_port(encoder);
+
+	/* step4: disable IO power */
+	gen11_dsi_disable_io_power(encoder);
+}
+
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 pll_id;
+
+	/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+	pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+	pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+	pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
+{
+	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+						   base);
+	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *fixed_mode =
+					intel_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+
+	intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+	intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+	adjusted_mode->flags = 0;
+
+	/* Dual link goes to trancoder DSI'0' */
+	if (intel_dsi->ports == BIT(PORT_B))
+		pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+	else
+		pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+	pipe_config->clock_set = true;
+	pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+	return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+				       struct intel_crtc_state *crtc_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u64 domains = 0;
+	enum port port;
+
+	for_each_dsi_port(port, intel_dsi->ports)
+		if (port == PORT_A)
+			domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+		else
+			domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+	return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	u32 tmp;
+	enum port port;
+	enum transcoder dsi_trans;
+	bool ret = false;
+
+	if (!intel_display_power_get_if_enabled(dev_priv,
+						encoder->power_domain))
+		return false;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		dsi_trans = dsi_port_to_transcoder(port);
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		case TRANS_DDI_EDP_INPUT_A_ON:
+			*pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			*pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			*pipe = PIPE_C;
+			break;
+		default:
+			DRM_ERROR("Invalid PIPE input\n");
+			goto out;
+		}
+
+		tmp = I915_READ(PIPECONF(dsi_trans));
+		ret = tmp & PIPECONF_ENABLE;
+	}
+out:
+	intel_display_power_put(dev_priv, encoder->power_domain);
+	return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+	.destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+	.late_register = intel_connector_register,
+	.early_unregister = intel_connector_unregister,
+	.destroy = intel_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_get_property = intel_digital_connector_atomic_get_property,
+	.atomic_set_property = intel_digital_connector_atomic_set_property,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+	.get_modes = intel_dsi_get_modes,
+	.mode_valid = intel_dsi_mode_valid,
+	.atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *dsi)
+{
+	return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+				       const struct mipi_dsi_msg *msg)
+{
+	struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+	struct mipi_dsi_packet dsi_pkt;
+	ssize_t ret;
+	bool enable_lpdt = false;
+
+	ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+	if (ret < 0)
+		return ret;
+
+	if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+		enable_lpdt = true;
+
+	/* send packet header */
+	ret  = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+	if (ret < 0)
+		return ret;
+
+	/* only long packet contains payload */
+	if (mipi_dsi_packet_format_is_long(msg->type)) {
+		ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+		if (ret < 0)
+			return ret;
+	}
+
+	//TODO: add payload receive code if needed
+
+	ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+	return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+	.attach = gen11_dsi_host_attach,
+	.detach = gen11_dsi_host_detach,
+	.transfer = gen11_dsi_host_transfer,
+};
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_dsi *intel_dsi;
+	struct intel_encoder *encoder;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_display_mode *scan, *fixed_mode = NULL;
+	enum port port;
+
+	if (!intel_bios_is_dsi_present(dev_priv, &port))
+		return;
+
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return;
+
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return;
+	}
+
+	encoder = &intel_dsi->base;
+	intel_dsi->attached_connector = intel_connector;
+	connector = &intel_connector->base;
+
+	/* register DSI encoder with DRM subsystem */
+	drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+	encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+	encoder->pre_enable = gen11_dsi_pre_enable;
+	encoder->disable = gen11_dsi_disable;
+	encoder->port = port;
+	encoder->get_config = gen11_dsi_get_config;
+	encoder->compute_config = gen11_dsi_compute_config;
+	encoder->get_hw_state = gen11_dsi_get_hw_state;
+	encoder->type = INTEL_OUTPUT_DSI;
+	encoder->cloneable = 0;
+	encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+	encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+	encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+	/* register DSI connector with DRM subsystem */
+	drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+	drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	/* attach connector to encoder */
+	intel_connector_attach_encoder(intel_connector, encoder);
+
+	/* fill mode info from VBT */
+	mutex_lock(&dev->mode_config.mutex);
+	intel_dsi_vbt_get_modes(intel_dsi);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (!fixed_mode) {
+		DRM_ERROR("DSI fixed mode info missing\n");
+		goto err;
+	}
+
+	connector->display_info.width_mm = fixed_mode->width_mm;
+	connector->display_info.height_mm = fixed_mode->height_mm;
+	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+	if (dev_priv->vbt.dsi.config->dual_link)
+		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+	else
+		intel_dsi->ports = BIT(port);
+
+	intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+	intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+	for_each_dsi_port(port, intel_dsi->ports) {
+		struct intel_dsi_host *host;
+
+		host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+		if (!host)
+			goto err;
+
+		intel_dsi->dsi_hosts[port] = host;
+	}
+
+	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+		DRM_DEBUG_KMS("no device found\n");
+		goto err;
+	}
+
+	return;
+
+err:
+	drm_encoder_cleanup(&encoder->base);
+	kfree(intel_dsi);
+	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952b..8cb02f2 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
 	crtc_state->fifo_changed = false;
 	crtc_state->wm.need_postvbl_update = false;
 	crtc_state->fb_bits = 0;
+	crtc_state->update_planes = 0;
 
 	return &crtc_state->base;
 }
@@ -203,6 +204,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
 	drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+				      int num_scalers_need, struct intel_crtc *intel_crtc,
+				      const char *name, int idx,
+				      struct intel_plane_state *plane_state,
+				      int *scaler_id)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	int j;
+	u32 mode;
+
+	if (*scaler_id < 0) {
+		/* find a free scaler */
+		for (j = 0; j < intel_crtc->num_scalers; j++) {
+			if (scaler_state->scalers[j].in_use)
+				continue;
+
+			*scaler_id = j;
+			scaler_state->scalers[*scaler_id].in_use = 1;
+			break;
+		}
+	}
+
+	if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+		return;
+
+	/* set scaler mode */
+	if (plane_state && plane_state->base.fb &&
+	    plane_state->base.fb->format->is_yuv &&
+	    plane_state->base.fb->format->num_planes > 1) {
+		if (IS_GEN9(dev_priv) &&
+		    !IS_GEMINILAKE(dev_priv)) {
+			mode = SKL_PS_SCALER_MODE_NV12;
+		} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+			/*
+			 * On gen11+'s HDR planes we only use the scaler for
+			 * scaling. They have a dedicated chroma upsampler, so
+			 * we don't need the scaler to upsample the UV plane.
+			 */
+			mode = PS_SCALER_MODE_NORMAL;
+		} else {
+			mode = PS_SCALER_MODE_PLANAR;
+
+			if (plane_state->linked_plane)
+				mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+		}
+	} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+		mode = PS_SCALER_MODE_NORMAL;
+	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+		/*
+		 * when only 1 scaler is in use on a pipe with 2 scalers
+		 * scaler 0 operates in high quality (HQ) mode.
+		 * In this case use scaler 0 to take advantage of HQ mode
+		 */
+		scaler_state->scalers[*scaler_id].in_use = 0;
+		*scaler_id = 0;
+		scaler_state->scalers[0].in_use = 1;
+		mode = SKL_PS_SCALER_MODE_HQ;
+	} else {
+		mode = SKL_PS_SCALER_MODE_DYN;
+	}
+
+	DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+		      intel_crtc->pipe, *scaler_id, name, idx);
+	scaler_state->scalers[*scaler_id].mode = mode;
+}
+
 /**
  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
  * @dev_priv: i915 device
@@ -232,7 +299,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 	struct drm_atomic_state *drm_state = crtc_state->base.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
 	int num_scalers_need;
-	int i, j;
+	int i;
 
 	num_scalers_need = hweight32(scaler_state->scaler_users);
 
@@ -304,59 +371,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			idx = plane->base.id;
 
 			/* plane on different crtc cannot be a scaler user of this crtc */
-			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
 				continue;
-			}
 
 			plane_state = intel_atomic_get_new_plane_state(intel_state,
 								       intel_plane);
 			scaler_id = &plane_state->scaler_id;
 		}
 
-		if (*scaler_id < 0) {
-			/* find a free scaler */
-			for (j = 0; j < intel_crtc->num_scalers; j++) {
-				if (!scaler_state->scalers[j].in_use) {
-					scaler_state->scalers[j].in_use = 1;
-					*scaler_id = j;
-					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-						intel_crtc->pipe, *scaler_id, name, idx);
-					break;
-				}
-			}
-		}
-
-		if (WARN_ON(*scaler_id < 0)) {
-			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
-			continue;
-		}
-
-		/* set scaler mode */
-		if ((INTEL_GEN(dev_priv) >= 9) &&
-		    plane_state && plane_state->base.fb &&
-		    plane_state->base.fb->format->format ==
-		    DRM_FORMAT_NV12) {
-			if (INTEL_GEN(dev_priv) == 9 &&
-			    !IS_GEMINILAKE(dev_priv) &&
-			    !IS_SKYLAKE(dev_priv))
-				scaler_state->scalers[*scaler_id].mode =
-					SKL_PS_SCALER_MODE_NV12;
-			else
-				scaler_state->scalers[*scaler_id].mode =
-					PS_SCALER_MODE_PLANAR;
-		} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
-			/*
-			 * when only 1 scaler is in use on either pipe A or B,
-			 * scaler 0 operates in high quality (HQ) mode.
-			 * In this case use scaler 0 to take advantage of HQ mode
-			 */
-			*scaler_id = 0;
-			scaler_state->scalers[0].in_use = 1;
-			scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
-			scaler_state->scalers[1].in_use = 0;
-		} else {
-			scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
-		}
+		intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+					  intel_crtc, name, idx,
+					  plane_state, scaler_id);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0..0a73e6e 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-/**
- * intel_create_plane_state - create plane state object
- * @plane: drm plane
- *
- * Allocates a fresh plane state for the given plane and sets some of
- * the state values to sensible initial values.
- *
- * Returns: A newly allocated plane state, or NULL on failure
- */
-struct intel_plane_state *
-intel_create_plane_state(struct drm_plane *plane)
+struct intel_plane *intel_plane_alloc(void)
 {
-	struct intel_plane_state *state;
+	struct intel_plane_state *plane_state;
+	struct intel_plane *plane;
 
-	state = kzalloc(sizeof(*state), GFP_KERNEL);
-	if (!state)
-		return NULL;
+	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+	if (!plane)
+		return ERR_PTR(-ENOMEM);
 
-	state->base.plane = plane;
-	state->base.rotation = DRM_MODE_ROTATE_0;
+	plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	if (!plane_state) {
+		kfree(plane);
+		return ERR_PTR(-ENOMEM);
+	}
 
-	return state;
+	__drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+	plane_state->scaler_id = -1;
+
+	return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+	intel_plane_destroy_state(&plane->base, plane->base.state);
+	kfree(plane);
 }
 
 /**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int ret;
 
+	crtc_state->active_planes &= ~BIT(intel_plane->id);
+	crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+	intel_state->base.visible = false;
+
+	/* If this is a cursor plane, no further checks are needed. */
 	if (!intel_state->base.crtc && !old_plane_state->base.crtc)
 		return 0;
 
-	intel_state->base.visible = false;
 	ret = intel_plane->check_plane(crtc_state, intel_state);
 	if (ret)
 		return ret;
@@ -128,13 +135,12 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	/* FIXME pre-g4x don't work like this */
 	if (state->visible)
 		crtc_state->active_planes |= BIT(intel_plane->id);
-	else
-		crtc_state->active_planes &= ~BIT(intel_plane->id);
 
 	if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
 		crtc_state->nv12_planes |= BIT(intel_plane->id);
-	else
-		crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
+	if (state->visible || old_plane_state->base.visible)
+		crtc_state->update_planes |= BIT(intel_plane->id);
 
 	return intel_plane_atomic_calc_changes(old_crtc_state,
 					       &crtc_state->base,
@@ -152,6 +158,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 	const struct drm_crtc_state *old_crtc_state;
 	struct drm_crtc_state *new_crtc_state;
 
+	new_plane_state->visible = false;
 	if (!crtc)
 		return 0;
 
@@ -164,29 +171,123 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 						   to_intel_plane_state(new_plane_state));
 }
 
-static void intel_plane_atomic_update(struct drm_plane *plane,
-				      struct drm_plane_state *old_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+			 struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+			 unsigned int *update_mask)
 {
-	struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
-	struct intel_plane *intel_plane = to_intel_plane(plane);
-	const struct intel_plane_state *new_plane_state =
-		intel_atomic_get_new_plane_state(state, intel_plane);
-	struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_plane_state *plane_state;
+	struct intel_plane *plane;
+	int i;
 
-	if (new_plane_state->base.visible) {
-		const struct intel_crtc_state *new_crtc_state =
-			intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
+	if (*update_mask == 0)
+		return NULL;
 
-		trace_intel_update_plane(plane,
-					 to_intel_crtc(crtc));
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		enum plane_id plane_id = plane->id;
 
-		intel_plane->update_plane(intel_plane,
-					  new_crtc_state, new_plane_state);
-	} else {
-		trace_intel_disable_plane(plane,
-					  to_intel_crtc(crtc));
+		if (crtc->pipe != plane->pipe ||
+		    !(*update_mask & BIT(plane_id)))
+			continue;
 
-		intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+						entries_y,
+						I915_MAX_PLANES, plane_id) ||
+		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+						entries_uv,
+						I915_MAX_PLANES, plane_id))
+			continue;
+
+		*update_mask &= ~BIT(plane_id);
+		entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+		entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+		return plane;
+	}
+
+	/* should never happen */
+	WARN_ON(1);
+
+	return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+			       struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+	struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+	u32 update_mask = new_crtc_state->update_planes;
+	struct intel_plane *plane;
+
+	memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+	memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+	       sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+	while ((plane = skl_next_plane_to_commit(state, crtc,
+						 entries_y, entries_uv,
+						 &update_mask))) {
+		struct intel_plane_state *new_plane_state =
+			intel_atomic_get_new_plane_state(state, plane);
+
+		if (new_plane_state->base.visible) {
+			trace_intel_update_plane(&plane->base, crtc);
+			plane->update_plane(plane, new_crtc_state, new_plane_state);
+		} else if (new_plane_state->slave) {
+			struct intel_plane *master =
+				new_plane_state->linked_plane;
+
+			/*
+			 * We update the slave plane from this function because
+			 * programming it from the master plane's update_plane
+			 * callback runs into issues when the Y plane is
+			 * reassigned, disabled or used by a different plane.
+			 *
+			 * The slave plane is updated with the master plane's
+			 * plane_state.
+			 */
+			new_plane_state =
+				intel_atomic_get_new_plane_state(state, master);
+
+			trace_intel_update_plane(&plane->base, crtc);
+			plane->update_slave(plane, new_crtc_state, new_plane_state);
+		} else {
+			trace_intel_disable_plane(&plane->base, crtc);
+			plane->disable_plane(plane, new_crtc_state);
+		}
+	}
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+				struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	u32 update_mask = new_crtc_state->update_planes;
+	struct intel_plane_state *new_plane_state;
+	struct intel_plane *plane;
+	int i;
+
+	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+		if (crtc->pipe != plane->pipe ||
+		    !(update_mask & BIT(plane->id)))
+			continue;
+
+		if (new_plane_state->base.visible) {
+			trace_intel_update_plane(&plane->base, crtc);
+			plane->update_plane(plane, new_crtc_state, new_plane_state);
+		} else {
+			trace_intel_disable_plane(&plane->base, crtc);
+			plane->disable_plane(plane, new_crtc_state);
+		}
 	}
 }
 
@@ -194,7 +295,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
 	.prepare_fb = intel_prepare_plane_fb,
 	.cleanup_fb = intel_cleanup_plane_fb,
 	.atomic_check = intel_plane_atomic_check,
-	.atomic_update = intel_plane_atomic_update,
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ee3ca2d..ae55a68 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -153,32 +153,32 @@ static const struct {
 	int n;
 	int cts;
 } hdmi_aud_ncts[] = {
-	{ 44100, TMDS_296M, 4459, 234375 },
-	{ 44100, TMDS_297M, 4704, 247500 },
-	{ 48000, TMDS_296M, 5824, 281250 },
-	{ 48000, TMDS_297M, 5120, 247500 },
 	{ 32000, TMDS_296M, 5824, 421875 },
 	{ 32000, TMDS_297M, 3072, 222750 },
-	{ 88200, TMDS_296M, 8918, 234375 },
-	{ 88200, TMDS_297M, 9408, 247500 },
-	{ 96000, TMDS_296M, 11648, 281250 },
-	{ 96000, TMDS_297M, 10240, 247500 },
-	{ 176400, TMDS_296M, 17836, 234375 },
-	{ 176400, TMDS_297M, 18816, 247500 },
-	{ 192000, TMDS_296M, 23296, 281250 },
-	{ 192000, TMDS_297M, 20480, 247500 },
-	{ 44100, TMDS_593M, 8918, 937500 },
-	{ 44100, TMDS_594M, 9408, 990000 },
-	{ 48000, TMDS_593M, 5824, 562500 },
-	{ 48000, TMDS_594M, 6144, 594000 },
 	{ 32000, TMDS_593M, 5824, 843750 },
 	{ 32000, TMDS_594M, 3072, 445500 },
+	{ 44100, TMDS_296M, 4459, 234375 },
+	{ 44100, TMDS_297M, 4704, 247500 },
+	{ 44100, TMDS_593M, 8918, 937500 },
+	{ 44100, TMDS_594M, 9408, 990000 },
+	{ 88200, TMDS_296M, 8918, 234375 },
+	{ 88200, TMDS_297M, 9408, 247500 },
 	{ 88200, TMDS_593M, 17836, 937500 },
 	{ 88200, TMDS_594M, 18816, 990000 },
-	{ 96000, TMDS_593M, 11648, 562500 },
-	{ 96000, TMDS_594M, 12288, 594000 },
+	{ 176400, TMDS_296M, 17836, 234375 },
+	{ 176400, TMDS_297M, 18816, 247500 },
 	{ 176400, TMDS_593M, 35672, 937500 },
 	{ 176400, TMDS_594M, 37632, 990000 },
+	{ 48000, TMDS_296M, 5824, 281250 },
+	{ 48000, TMDS_297M, 5120, 247500 },
+	{ 48000, TMDS_593M, 5824, 562500 },
+	{ 48000, TMDS_594M, 6144, 594000 },
+	{ 96000, TMDS_296M, 11648, 281250 },
+	{ 96000, TMDS_297M, 10240, 247500 },
+	{ 96000, TMDS_593M, 11648, 562500 },
+	{ 96000, TMDS_594M, 12288, 594000 },
+	{ 192000, TMDS_296M, 23296, 281250 },
+	{ 192000, TMDS_297M, 20480, 247500 },
 	{ 192000, TMDS_593M, 23296, 562500 },
 	{ 192000, TMDS_594M, 24576, 594000 },
 };
@@ -929,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
 	if (WARN_ON(acomp->base.ops || acomp->base.dev))
 		return -EEXIST;
 
+	if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+		return -ENOMEM;
+
 	drm_modeset_lock_all(&dev_priv->drm);
 	acomp->base.ops = &i915_audio_component_ops;
 	acomp->base.dev = i915_kdev;
@@ -952,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
 	acomp->base.dev = NULL;
 	dev_priv->audio_component = NULL;
 	drm_modeset_unlock_all(&dev_priv->drm);
+
+	device_link_remove(hda_kdev, i915_kdev);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494..6d3e026 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
 		intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
 	dev_priv->vbt.display_clock_mode = general->display_clock_mode;
 	dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+	if (bdb->version >= 181) {
+		dev_priv->vbt.orientation = general->rotate_180 ?
+			DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+			DRM_MODE_PANEL_ORIENTATION_NORMAL;
+	} else {
+		dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+	}
 	DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
 		      dev_priv->vbt.int_tv_support,
 		      dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
 
 	parse_dsi_backlight_ports(dev_priv, bdb->version, port);
 
+	/* FIXME is the 90 vs. 270 correct? */
+	switch (config->rotation) {
+	case ENABLE_ROTATION_0:
+		/*
+		 * Most (all?) VBTs claim 0 degrees despite having
+		 * an upside down panel, thus we do not trust this.
+		 */
+		dev_priv->vbt.dsi.orientation =
+			DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+		break;
+	case ENABLE_ROTATION_90:
+		dev_priv->vbt.dsi.orientation =
+			DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+		break;
+	case ENABLE_ROTATION_180:
+		dev_priv->vbt.dsi.orientation =
+			DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+		break;
+	case ENABLE_ROTATION_270:
+		dev_priv->vbt.dsi.orientation =
+			DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+		break;
+	}
+
 	/* We have mandatory mipi config blocks. Initialize as generic panel */
 	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 }
@@ -1721,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 	const struct bdb_header *bdb;
 	u8 __iomem *bios = NULL;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+	if (!HAS_DISPLAY(dev_priv)) {
 		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
 		return;
 	}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 
 		dvo_port = child->dvo_port;
 
-		switch (dvo_port) {
-		case DVO_PORT_MIPIA:
-		case DVO_PORT_MIPIC:
+		if (dvo_port == DVO_PORT_MIPIA ||
+		    (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
+		    (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
 			if (port)
 				*port = dvo_port - DVO_PORT_MIPIA;
 			return true;
-		case DVO_PORT_MIPIB:
-		case DVO_PORT_MIPID:
+		} else if (dvo_port == DVO_PORT_MIPIB ||
+			   dvo_port == DVO_PORT_MIPIC ||
+			   dvo_port == DVO_PORT_MIPID) {
 			DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
 				      port_name(dvo_port - DVO_PORT_MIPIA));
-			break;
 		}
 	}
 
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
 
 	return false;
 }
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+				   enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	enum aux_ch aux_ch;
+
+	if (!info->alternate_aux_channel) {
+		aux_ch = (enum aux_ch)port;
+
+		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+			      aux_ch_name(aux_ch), port_name(port));
+		return aux_ch;
+	}
+
+	switch (info->alternate_aux_channel) {
+	case DP_AUX_A:
+		aux_ch = AUX_CH_A;
+		break;
+	case DP_AUX_B:
+		aux_ch = AUX_CH_B;
+		break;
+	case DP_AUX_C:
+		aux_ch = AUX_CH_C;
+		break;
+	case DP_AUX_D:
+		aux_ch = AUX_CH_D;
+		break;
+	case DP_AUX_E:
+		aux_ch = AUX_CH_E;
+		break;
+	case DP_AUX_F:
+		aux_ch = AUX_CH_F;
+		break;
+	default:
+		MISSING_CASE(info->alternate_aux_channel);
+		aux_ch = AUX_CH_A;
+		break;
+	}
+
+	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+		      aux_ch_name(aux_ch), port_name(port));
+
+	return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d8..447c525 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
 
 #include "i915_drv.h"
 
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
 
 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
 {
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 8d74276..25e3aba 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2660,39 +2660,20 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
 		fraction = 200;
 	}
 
-	rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
-	if (fraction)
-		rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
-							    fraction) - 1);
+	rawclk = CNP_RAWCLK_DIV(divider / 1000);
+	if (fraction) {
+		int numerator = 1;
+
+		rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+							   fraction) - 1);
+		if (HAS_PCH_ICP(dev_priv))
+			rawclk |= ICP_RAWCLK_NUM(numerator);
+	}
 
 	I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
 	return divider + fraction;
 }
 
-static int icp_rawclk(struct drm_i915_private *dev_priv)
-{
-	u32 rawclk;
-	int divider, numerator, denominator, frequency;
-
-	if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
-		frequency = 24000;
-		divider = 23;
-		numerator = 0;
-		denominator = 0;
-	} else {
-		frequency = 19200;
-		divider = 18;
-		numerator = 1;
-		denominator = 4;
-	}
-
-	rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
-		 ICP_RAWCLK_DEN(denominator);
-
-	I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
-	return frequency;
-}
-
 static int pch_rawclk(struct drm_i915_private *dev_priv)
 {
 	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
@@ -2740,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_rawclk(struct drm_i915_private *dev_priv)
 {
-	if (HAS_PCH_ICP(dev_priv))
-		dev_priv->rawclk_freq = icp_rawclk(dev_priv);
-	else if (HAS_PCH_CNP(dev_priv))
+	if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
 		dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
 	else if (HAS_PCH_SPLIT(dev_priv))
 		dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7bea..5127da2 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
 	if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
 		limited_color_range = intel_crtc_state->limited_color_range;
 
-	if (intel_crtc_state->ycbcr420) {
+	if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+	    intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
 		ilk_load_ycbcr_conversion_matrix(intel_crtc);
 		return;
 	} else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 0000000..3d0271c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
+		for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+	for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+		for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+	PROCMON_0_85V_DOT_0,
+	PROCMON_0_95V_DOT_0,
+	PROCMON_0_95V_DOT_1,
+	PROCMON_1_05V_DOT_0,
+	PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+	u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+	[PROCMON_0_85V_DOT_0] =
+		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+	[PROCMON_0_95V_DOT_0] =
+		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+	[PROCMON_0_95V_DOT_1] =
+		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+	[PROCMON_1_05V_DOT_0] =
+		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+	[PROCMON_1_05V_DOT_1] =
+		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+	const struct cnl_procmon *procmon;
+	u32 val;
+
+	val = I915_READ(ICL_PORT_COMP_DW3(port));
+	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+	default:
+		MISSING_CASE(val);
+		/* fall through */
+	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+		break;
+	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+		break;
+	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+		break;
+	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+		break;
+	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+		break;
+	}
+
+	return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+				       enum port port)
+{
+	const struct cnl_procmon *procmon;
+	u32 val;
+
+	procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+	val = I915_READ(ICL_PORT_COMP_DW1(port));
+	val &= ~((0xff << 16) | 0xff);
+	val |= procmon->dw1;
+	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+			  enum port port, i915_reg_t reg, u32 mask,
+			  u32 expected_val)
+{
+	u32 val = I915_READ(reg);
+
+	if ((val & mask) != expected_val) {
+		DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+				 "current %08x mask %08x expected %08x\n",
+				 port_name(port),
+				 reg.reg, val, mask, expected_val);
+		return false;
+	}
+
+	return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+					  enum port port)
+{
+	const struct cnl_procmon *procmon;
+	bool ret;
+
+	procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+	ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+			    (0xff << 16) | 0xff, procmon->dw1);
+	ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+			     -1U, procmon->dw9);
+	ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+			     -1U, procmon->dw10);
+
+	return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+	return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+		(I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+	enum port port = PORT_A;
+	bool ret;
+
+	if (!cnl_combo_phy_enabled(dev_priv))
+		return false;
+
+	ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+	ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+			     CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+	return ret;
+}
+
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(CHICKEN_MISC_2);
+	val &= ~CNL_COMP_PWR_DOWN;
+	I915_WRITE(CHICKEN_MISC_2, val);
+
+	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
+	cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+	val = I915_READ(CNL_PORT_COMP_DW0);
+	val |= COMP_INIT;
+	I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+	val = I915_READ(CNL_PORT_CL1CM_DW5);
+	val |= CL_POWER_DOWN_ENABLE;
+	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	if (!cnl_combo_phy_verify_state(dev_priv))
+		DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+	val = I915_READ(CHICKEN_MISC_2);
+	val |= CNL_COMP_PWR_DOWN;
+	I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+				  enum port port)
+{
+	return !(I915_READ(ICL_PHY_MISC(port)) &
+		 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+		(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+				       enum port port)
+{
+	bool ret;
+
+	if (!icl_combo_phy_enabled(dev_priv, port))
+		return false;
+
+	ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+	ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+			     CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+	return ret;
+}
+
+void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+	enum port port;
+
+	for_each_combo_port(dev_priv, port) {
+		u32 val;
+
+		if (icl_combo_phy_verify_state(dev_priv, port)) {
+			DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+					 port_name(port));
+			continue;
+		}
+
+		val = I915_READ(ICL_PHY_MISC(port));
+		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+		I915_WRITE(ICL_PHY_MISC(port), val);
+
+		cnl_set_procmon_ref_values(dev_priv, port);
+
+		val = I915_READ(ICL_PORT_COMP_DW0(port));
+		val |= COMP_INIT;
+		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+		val = I915_READ(ICL_PORT_CL_DW5(port));
+		val |= CL_POWER_DOWN_ENABLE;
+		I915_WRITE(ICL_PORT_CL_DW5(port), val);
+	}
+}
+
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+	enum port port;
+
+	for_each_combo_port_reverse(dev_priv, port) {
+		u32 val;
+
+		if (!icl_combo_phy_verify_state(dev_priv, port))
+			DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+				 port_name(port));
+
+		val = I915_READ(ICL_PHY_MISC(port));
+		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+		I915_WRITE(ICL_PHY_MISC(port), val);
+
+		val = I915_READ(ICL_PORT_COMP_DW0(port));
+		val &= ~COMP_INIT;
+		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
new file mode 100644
index 0000000..18e370f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+int intel_connector_init(struct intel_connector *connector)
+{
+	struct intel_digital_connector_state *conn_state;
+
+	/*
+	 * Allocate enough memory to hold intel_digital_connector_state,
+	 * This might be a few bytes too many, but for connectors that don't
+	 * need it we'll free the state and allocate a smaller one on the first
+	 * successful commit anyway.
+	 */
+	conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+	if (!conn_state)
+		return -ENOMEM;
+
+	__drm_atomic_helper_connector_reset(&connector->base,
+					    &conn_state->base);
+
+	return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+	struct intel_connector *connector;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	if (intel_connector_init(connector) < 0) {
+		kfree(connector);
+		return NULL;
+	}
+
+	return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+	kfree(to_intel_digital_connector_state(connector->base.state));
+	kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	kfree(intel_connector->detect_edid);
+
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	intel_panel_fini(&intel_connector->panel);
+
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	int ret;
+
+	ret = intel_backlight_device_register(intel_connector);
+	if (ret)
+		goto err;
+
+	if (i915_inject_load_failure()) {
+		ret = -EFAULT;
+		goto err_backlight;
+	}
+
+	return 0;
+
+err_backlight:
+	intel_backlight_device_unregister(intel_connector);
+err:
+	return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder)
+{
+	connector->encoder = encoder;
+	drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+	enum pipe pipe = 0;
+	struct intel_encoder *encoder = connector->encoder;
+
+	return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	if (!connector->base.state->crtc)
+		return INVALID_PIPE;
+
+	return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid)
+{
+	int ret;
+
+	drm_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+
+	return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+			struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret;
+
+	edid = drm_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+	{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
+	{ HDMI_AUDIO_OFF, "off" },
+	{ HDMI_AUDIO_AUTO, "auto" },
+	{ HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_property *prop;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, 0,
+					   "audio",
+					   force_audio_names,
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+	{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+	{ INTEL_BROADCAST_RGB_FULL, "Full" },
+	{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_property *prop;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   broadcast_rgb_names,
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+	if (!drm_mode_create_aspect_ratio_property(connector->dev))
+		drm_object_attach_property(&connector->base,
+			connector->dev->mode_config.aspect_ratio_property,
+			DRM_MODE_PICTURE_ASPECT_NONE);
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82..68f2fb8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	return true;
 }
 
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
 		return false;
 
 	pipe_config->has_pch_encoder = true;
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
 	return true;
 }
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
 		return false;
 
 	pipe_config->has_pch_encoder = true;
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
 	/* LPT FDI RX only supports 8bpc. */
 	if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ intel_crt_detect(struct drm_connector *connector,
 	return status;
 }
 
-static void intel_crt_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_crt_destroy,
+	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e..a516697 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,38 @@
  * low-power state and comes back to normal.
  */
 
-#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_ICL);
+#define GEN12_CSR_MAX_FW_SIZE		ICL_CSR_MAX_FW_SIZE
+
+#define ICL_CSR_PATH			"i915/icl_dmc_ver1_07.bin"
 #define ICL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
-
-#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
-
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_CNL);
-#define CNL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
-
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_KBL);
-#define KBL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
-
-#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
-MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 27)
-
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_BXT);
-#define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
-
-
-#define BXT_CSR_MAX_FW_SIZE		0x3000
-#define GLK_CSR_MAX_FW_SIZE		0x4000
 #define ICL_CSR_MAX_FW_SIZE		0x6000
+MODULE_FIRMWARE(ICL_CSR_PATH);
+
+#define CNL_CSR_PATH			"i915/cnl_dmc_ver1_07.bin"
+#define CNL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
+#define CNL_CSR_MAX_FW_SIZE		GLK_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_CSR_PATH);
+
+#define GLK_CSR_PATH			"i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
+#define GLK_CSR_MAX_FW_SIZE		0x4000
+MODULE_FIRMWARE(GLK_CSR_PATH);
+
+#define KBL_CSR_PATH			"i915/kbl_dmc_ver1_04.bin"
+#define KBL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
+#define KBL_CSR_MAX_FW_SIZE		BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_CSR_PATH);
+
+#define SKL_CSR_PATH			"i915/skl_dmc_ver1_27.bin"
+#define SKL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 27)
+#define SKL_CSR_MAX_FW_SIZE		BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_CSR_PATH);
+
+#define BXT_CSR_PATH			"i915/bxt_dmc_ver1_07.bin"
+#define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
+#define BXT_CSR_MAX_FW_SIZE		0x3000
+MODULE_FIRMWARE(BXT_CSR_PATH);
+
 #define CSR_DEFAULT_FW_OFFSET		0xFFFFFFFF
 
 struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
 	{'B', '0'}, {'B', '1'}, {'B', '2'}
 };
 
+static const struct stepping_info icl_stepping_info[] = {
+	{'A', '0'}, {'A', '1'}, {'A', '2'},
+	{'B', '0'}, {'B', '2'},
+	{'C', '0'}
+};
+
 static const struct stepping_info no_stepping_info = { '*', '*' };
 
 static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
 	const struct stepping_info *si;
 	unsigned int size;
 
-	if (IS_SKYLAKE(dev_priv)) {
+	if (IS_ICELAKE(dev_priv)) {
+		size = ARRAY_SIZE(icl_stepping_info);
+		si = icl_stepping_info;
+	} else if (IS_SKYLAKE(dev_priv)) {
 		size = ARRAY_SIZE(skl_stepping_info);
 		si = skl_stepping_info;
 	} else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 	struct intel_csr *csr = &dev_priv->csr;
 	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
 	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
-	uint32_t max_fw_size = 0;
 	uint32_t i;
 	uint32_t *dmc_payload;
-	uint32_t required_version;
 
 	if (!fw)
 		return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 		return NULL;
 	}
 
-	csr->version = css_header->version;
-
-	if (csr->fw_path == i915_modparams.dmc_firmware_path) {
-		/* Bypass version check for firmware override. */
-		required_version = csr->version;
-	} else if (IS_ICELAKE(dev_priv)) {
-		required_version = ICL_CSR_VERSION_REQUIRED;
-	} else if (IS_CANNONLAKE(dev_priv)) {
-		required_version = CNL_CSR_VERSION_REQUIRED;
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		required_version = GLK_CSR_VERSION_REQUIRED;
-	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-		required_version = KBL_CSR_VERSION_REQUIRED;
-	} else if (IS_SKYLAKE(dev_priv)) {
-		required_version = SKL_CSR_VERSION_REQUIRED;
-	} else if (IS_BROXTON(dev_priv)) {
-		required_version = BXT_CSR_VERSION_REQUIRED;
-	} else {
-		MISSING_CASE(INTEL_REVID(dev_priv));
-		required_version = 0;
-	}
-
-	if (csr->version != required_version) {
+	if (csr->required_version &&
+	    css_header->version != csr->required_version) {
 		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
 			 " please use v%u.%u\n",
-			 CSR_VERSION_MAJOR(csr->version),
-			 CSR_VERSION_MINOR(csr->version),
-			 CSR_VERSION_MAJOR(required_version),
-			 CSR_VERSION_MINOR(required_version));
+			 CSR_VERSION_MAJOR(css_header->version),
+			 CSR_VERSION_MINOR(css_header->version),
+			 CSR_VERSION_MAJOR(csr->required_version),
+			 CSR_VERSION_MINOR(csr->required_version));
 		return NULL;
 	}
 
+	csr->version = css_header->version;
+
 	readcount += sizeof(struct intel_css_header);
 
 	/* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
 	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
 	nbytes = dmc_header->fw_size * 4;
-	if (INTEL_GEN(dev_priv) >= 11)
-		max_fw_size = ICL_CSR_MAX_FW_SIZE;
-	else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-		max_fw_size = GLK_CSR_MAX_FW_SIZE;
-	else if (IS_GEN9(dev_priv))
-		max_fw_size = BXT_CSR_MAX_FW_SIZE;
-	else
-		MISSING_CASE(INTEL_REVID(dev_priv));
-	if (nbytes > max_fw_size) {
+	if (nbytes > csr->max_fw_size) {
 		DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
 		return NULL;
 	}
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 	if (!HAS_CSR(dev_priv))
 		return;
 
-	if (i915_modparams.dmc_firmware_path)
-		csr->fw_path = i915_modparams.dmc_firmware_path;
-	else if (IS_ICELAKE(dev_priv))
-		csr->fw_path = I915_CSR_ICL;
-	else if (IS_CANNONLAKE(dev_priv))
-		csr->fw_path = I915_CSR_CNL;
-	else if (IS_GEMINILAKE(dev_priv))
-		csr->fw_path = I915_CSR_GLK;
-	else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
-		csr->fw_path = I915_CSR_KBL;
-	else if (IS_SKYLAKE(dev_priv))
-		csr->fw_path = I915_CSR_SKL;
-	else if (IS_BROXTON(dev_priv))
-		csr->fw_path = I915_CSR_BXT;
-
 	/*
-	 * Obtain a runtime pm reference, until CSR is loaded,
-	 * to avoid entering runtime-suspend.
+	 * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+	 * runtime-suspend.
+	 *
+	 * On error, we return with the rpm wakeref held to prevent runtime
+	 * suspend as runtime suspend *requires* a working CSR for whatever
+	 * reason.
 	 */
 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
+	if (INTEL_GEN(dev_priv) >= 12) {
+		/* Allow to load fw via parameter using the last known size */
+		csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+	} else if (IS_ICELAKE(dev_priv)) {
+		csr->fw_path = ICL_CSR_PATH;
+		csr->required_version = ICL_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+	} else if (IS_CANNONLAKE(dev_priv)) {
+		csr->fw_path = CNL_CSR_PATH;
+		csr->required_version = CNL_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+	} else if (IS_GEMINILAKE(dev_priv)) {
+		csr->fw_path = GLK_CSR_PATH;
+		csr->required_version = GLK_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+		csr->fw_path = KBL_CSR_PATH;
+		csr->required_version = KBL_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		csr->fw_path = SKL_CSR_PATH;
+		csr->required_version = SKL_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+	} else if (IS_BROXTON(dev_priv)) {
+		csr->fw_path = BXT_CSR_PATH;
+		csr->required_version = BXT_CSR_VERSION_REQUIRED;
+		csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+	}
+
+	if (i915_modparams.dmc_firmware_path) {
+		if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+			csr->fw_path = NULL;
+			DRM_INFO("Disabling CSR firmware and runtime PM\n");
+			return;
+		}
+
+		csr->fw_path = i915_modparams.dmc_firmware_path;
+		/* Bypass version check for firmware override. */
+		csr->required_version = 0;
+	}
+
 	if (csr->fw_path == NULL) {
 		DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
 		WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7..f3e1d6a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
 #include <drm/drm_scdc_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
+#include "intel_dsi.h"
 
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
@@ -642,7 +643,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct ddi_buf_trans *
 kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	if (IS_KBL_ULX(dev_priv)) {
+	if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
 		*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
 		return kbl_y_ddi_translations_dp;
 	} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +659,7 @@ static const struct ddi_buf_trans *
 skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
 	if (dev_priv->vbt.edp.low_vswing) {
-		if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+		if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
 			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
 			return skl_y_ddi_translations_edp;
 		} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +681,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct ddi_buf_trans *
 skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
 		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
 		return skl_y_ddi_translations_hdmi;
 	} else {
@@ -1060,10 +1061,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 }
 
 static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
-				       const struct intel_shared_dpll *pll)
+				       const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	int clock = crtc->config->port_clock;
+	const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+	int clock = crtc_state->port_clock;
 	const enum intel_dpll_id id = pll->info->id;
 
 	switch (id) {
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 	return dco_freq / (p0 * p1 * p2 * 5);
 }
 
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			       enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+			enum intel_dpll_id pll_id)
 {
 	uint32_t cfgcr0, cfgcr1;
 	uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1517,7 +1518,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 	else
 		dotclock = pipe_config->port_clock;
 
-	if (pipe_config->ycbcr420)
+	if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 		dotclock *= 2;
 
 	if (pipe_config->pixel_multiplier)
@@ -1737,16 +1738,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (INTEL_GEN(dev_priv) <= 8)
-		hsw_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_BC(dev_priv))
-		skl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_LP(dev_priv))
-		bxt_ddi_clock_get(encoder, pipe_config);
+	if (IS_ICELAKE(dev_priv))
+		icl_ddi_clock_get(encoder, pipe_config);
 	else if (IS_CANNONLAKE(dev_priv))
 		cnl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_ICELAKE(dev_priv))
-		icl_ddi_clock_get(encoder, pipe_config);
+	else if (IS_GEN9_LP(dev_priv))
+		bxt_ddi_clock_get(encoder, pipe_config);
+	else if (IS_GEN9_BC(dev_priv))
+		skl_ddi_clock_get(encoder, pipe_config);
+	else if (INTEL_GEN(dev_priv) <= 8)
+		hsw_ddi_clock_get(encoder, pipe_config);
 }
 
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1785,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
 		break;
 	}
 
+	/*
+	 * As per DP 1.2 spec section 2.3.4.3 while sending
+	 * YCBCR 444 signals we should program MSA MISC1/0 fields with
+	 * colorspace information. The output colorspace encoding is BT601.
+	 */
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+		temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
 	I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
 }
 
@@ -1998,24 +2006,24 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 	return ret;
 }
 
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
-			    enum pipe *pipe)
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+					u8 *pipe_mask, bool *is_dp_mst)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = encoder->port;
 	enum pipe p;
 	u32 tmp;
-	bool ret;
+	u8 mst_pipe_mask;
+
+	*pipe_mask = 0;
+	*is_dp_mst = false;
 
 	if (!intel_display_power_get_if_enabled(dev_priv,
 						encoder->power_domain))
-		return false;
-
-	ret = false;
+		return;
 
 	tmp = I915_READ(DDI_BUF_CTL(port));
-
 	if (!(tmp & DDI_BUF_CTL_ENABLE))
 		goto out;
 
@@ -2023,44 +2031,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		default:
+			MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+			/* fallthrough */
 		case TRANS_DDI_EDP_INPUT_A_ON:
 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
-			*pipe = PIPE_A;
+			*pipe_mask = BIT(PIPE_A);
 			break;
 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
-			*pipe = PIPE_B;
+			*pipe_mask = BIT(PIPE_B);
 			break;
 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
-			*pipe = PIPE_C;
+			*pipe_mask = BIT(PIPE_C);
 			break;
 		}
 
-		ret = true;
-
 		goto out;
 	}
 
+	mst_pipe_mask = 0;
 	for_each_pipe(dev_priv, p) {
-		enum transcoder cpu_transcoder = (enum transcoder) p;
+		enum transcoder cpu_transcoder = (enum transcoder)p;
 
 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
-		if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
-			if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
-			    TRANS_DDI_MODE_SELECT_DP_MST)
-				goto out;
+		if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+			continue;
 
-			*pipe = p;
-			ret = true;
+		if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+		    TRANS_DDI_MODE_SELECT_DP_MST)
+			mst_pipe_mask |= BIT(p);
 
-			goto out;
-		}
+		*pipe_mask |= BIT(p);
 	}
 
-	DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+	if (!*pipe_mask)
+		DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
+			      port_name(port));
+
+	if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+		DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
+			      port_name(port), *pipe_mask);
+		*pipe_mask = BIT(ffs(*pipe_mask) - 1);
+	}
+
+	if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+		DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
+			      port_name(port), *pipe_mask, mst_pipe_mask);
+	else
+		*is_dp_mst = mst_pipe_mask;
 
 out:
-	if (ret && IS_GEN9_LP(dev_priv)) {
+	if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
 		tmp = I915_READ(BXT_PHY_CTL(port));
 		if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
 			    BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2092,26 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 	}
 
 	intel_display_power_put(dev_priv, encoder->power_domain);
+}
 
-	return ret;
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+			    enum pipe *pipe)
+{
+	u8 pipe_mask;
+	bool is_mst;
+
+	intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+	if (is_mst || !pipe_mask)
+		return false;
+
+	*pipe = ffs(pipe_mask) - 1;
+
+	return true;
 }
 
 static inline enum intel_display_power_domain
-intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
 {
 	/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
 	 * DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2125,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
 	 * Note that PSR is enabled only on Port A even though this function
 	 * returns the correct domain for other ports too.
 	 */
-	return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
-					      intel_dp->aux_power_domain;
+	return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+					      intel_aux_power_domain(dig_port);
 }
 
 static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
 				       struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port;
 	u64 domains;
 
@@ -2110,12 +2147,19 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
 	dig_port = enc_to_dig_port(&encoder->base);
 	domains = BIT_ULL(dig_port->ddi_io_power_domain);
 
-	/* AUX power is only needed for (e)DP mode, not for HDMI. */
-	if (intel_crtc_has_dp_encoder(crtc_state)) {
-		struct intel_dp *intel_dp = &dig_port->dp;
+	/*
+	 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+	 * ports.
+	 */
+	if (intel_crtc_has_dp_encoder(crtc_state) ||
+	    intel_port_is_tc(dev_priv, encoder->port))
+		domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
 
-		domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
-	}
+	/*
+	 * VDSC power is needed when DSC is enabled
+	 */
+	if (crtc_state->dsc_params.compression_enable)
+		domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
 
 	return domains;
 }
@@ -2748,77 +2792,130 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
-			   struct intel_crtc_state *crtc_state,
-			   struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct drm_connector_state *conn_state;
-	struct drm_connector *conn;
-	int i;
+	enum port port = encoder->port;
+	u32 val;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-		struct intel_encoder *encoder =
-			to_intel_encoder(conn_state->best_encoder);
-		enum port port;
-		uint32_t val;
+	mutex_lock(&dev_priv->dpll_lock);
 
-		if (conn_state->crtc != crtc)
-			continue;
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
 
-		port = encoder->port;
-		mutex_lock(&dev_priv->dpll_lock);
-
-		val = I915_READ(DPCLKA_CFGCR0_ICL);
-		WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
-
-		if (intel_port_is_combophy(dev_priv, port)) {
-			val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
-			val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
-			I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-			POSTING_READ(DPCLKA_CFGCR0_ICL);
-		}
-
-		val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	if (intel_port_is_combophy(dev_priv, port)) {
+		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
 		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-
-		mutex_unlock(&dev_priv->dpll_lock);
+		POSTING_READ(DPCLKA_CFGCR0_ICL);
 	}
+
+	val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+	mutex_unlock(&dev_priv->dpll_lock);
 }
 
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
-			     struct intel_crtc_state *crtc_state,
-			     struct drm_atomic_state *old_state)
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct drm_connector_state *old_conn_state;
-	struct drm_connector *conn;
-	int i;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
 
-	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
-		struct intel_encoder *encoder =
-			to_intel_encoder(old_conn_state->best_encoder);
-		enum port port;
+	mutex_lock(&dev_priv->dpll_lock);
 
-		if (old_conn_state->crtc != crtc)
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+	I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	u32 val;
+	enum port port;
+	u32 port_mask;
+	bool ddi_clk_needed;
+
+	/*
+	 * In case of DP MST, we sanitize the primary encoder only, not the
+	 * virtual ones.
+	 */
+	if (encoder->type == INTEL_OUTPUT_DP_MST)
+		return;
+
+	if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+		u8 pipe_mask;
+		bool is_mst;
+
+		intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+		/*
+		 * In the unlikely case that BIOS enables DP in MST mode, just
+		 * warn since our MST HW readout is incomplete.
+		 */
+		if (WARN_ON(is_mst))
+			return;
+	}
+
+	port_mask = BIT(encoder->port);
+	ddi_clk_needed = encoder->base.crtc;
+
+	if (encoder->type == INTEL_OUTPUT_DSI) {
+		struct intel_encoder *other_encoder;
+
+		port_mask = intel_dsi_encoder_ports(encoder);
+		/*
+		 * Sanity check that we haven't incorrectly registered another
+		 * encoder using any of the ports of this DSI encoder.
+		 */
+		for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+			if (other_encoder == encoder)
+				continue;
+
+			if (WARN_ON(port_mask & BIT(other_encoder->port)))
+				return;
+		}
+		/*
+		 * DSI ports should have their DDI clock ungated when disabled
+		 * and gated when enabled.
+		 */
+		ddi_clk_needed = !encoder->base.crtc;
+	}
+
+	val = I915_READ(DPCLKA_CFGCR0_ICL);
+	for_each_port_masked(port, port_mask) {
+		bool ddi_clk_ungated = !(val &
+					 icl_dpclka_cfgcr0_clk_off(dev_priv,
+								   port));
+
+		if (ddi_clk_needed == ddi_clk_ungated)
 			continue;
 
-		port = encoder->port;
-		mutex_lock(&dev_priv->dpll_lock);
-		I915_WRITE(DPCLKA_CFGCR0_ICL,
-			   I915_READ(DPCLKA_CFGCR0_ICL) |
-			   icl_dpclka_cfgcr0_clk_off(dev_priv, port));
-		mutex_unlock(&dev_priv->dpll_lock);
+		/*
+		 * Punt on the case now where clock is gated, but it would
+		 * be needed by the port. Something else is really broken then.
+		 */
+		if (WARN_ON(ddi_clk_needed))
+			continue;
+
+		DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+			 port_name(port));
+		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 	}
 }
 
 static void intel_ddi_clk_select(struct intel_encoder *encoder,
-				 const struct intel_shared_dpll *pll)
+				 const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 	uint32_t val;
+	const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
 	if (WARN_ON(!pll))
 		return;
@@ -2828,7 +2925,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 	if (IS_ICELAKE(dev_priv)) {
 		if (!intel_port_is_combophy(dev_priv, port))
 			I915_WRITE(DDI_CLK_SEL(port),
-				   icl_pll_to_ddi_pll_sel(encoder, pll));
+				   icl_pll_to_ddi_pll_sel(encoder, crtc_state));
 	} else if (IS_CANNONLAKE(dev_priv)) {
 		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
 		val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2978,184 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 	}
 }
 
+static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum port port = dig_port->base.port;
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+	u32 val;
+	int i;
+
+	if (tc_port == PORT_TC_NONE)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+		val = I915_READ(mg_regs[i]);
+		val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+		       MG_DP_MODE_CFG_TRPWR_GATING |
+		       MG_DP_MODE_CFG_CLNPWR_GATING |
+		       MG_DP_MODE_CFG_DIGPWR_GATING |
+		       MG_DP_MODE_CFG_GAONPWR_GATING;
+		I915_WRITE(mg_regs[i], val);
+	}
+
+	val = I915_READ(MG_MISC_SUS0(tc_port));
+	val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+	       MG_MISC_SUS0_CFG_TR2PWR_GATING |
+	       MG_MISC_SUS0_CFG_CL2PWR_GATING |
+	       MG_MISC_SUS0_CFG_GAONPWR_GATING |
+	       MG_MISC_SUS0_CFG_TRPWR_GATING |
+	       MG_MISC_SUS0_CFG_CL1PWR_GATING |
+	       MG_MISC_SUS0_CFG_DGPWR_GATING;
+	I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum port port = dig_port->base.port;
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+	u32 val;
+	int i;
+
+	if (tc_port == PORT_TC_NONE)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+		val = I915_READ(mg_regs[i]);
+		val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+			 MG_DP_MODE_CFG_TRPWR_GATING |
+			 MG_DP_MODE_CFG_CLNPWR_GATING |
+			 MG_DP_MODE_CFG_DIGPWR_GATING |
+			 MG_DP_MODE_CFG_GAONPWR_GATING);
+		I915_WRITE(mg_regs[i], val);
+	}
+
+	val = I915_READ(MG_MISC_SUS0(tc_port));
+	val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+		 MG_MISC_SUS0_CFG_TR2PWR_GATING |
+		 MG_MISC_SUS0_CFG_CL2PWR_GATING |
+		 MG_MISC_SUS0_CFG_GAONPWR_GATING |
+		 MG_MISC_SUS0_CFG_TRPWR_GATING |
+		 MG_MISC_SUS0_CFG_CL1PWR_GATING |
+		 MG_MISC_SUS0_CFG_DGPWR_GATING);
+	I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	enum port port = intel_dig_port->base.port;
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	u32 ln0, ln1, lane_info;
+
+	if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+		return;
+
+	ln0 = I915_READ(MG_DP_MODE(port, 0));
+	ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+	switch (intel_dig_port->tc_type) {
+	case TC_PORT_TYPEC:
+		ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+		ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+		lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+			     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+			    DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+		switch (lane_info) {
+		case 0x1:
+		case 0x4:
+			break;
+		case 0x2:
+			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+			break;
+		case 0x3:
+			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+			       MG_DP_MODE_CFG_DP_X2_MODE;
+			break;
+		case 0x8:
+			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+			break;
+		case 0xC:
+			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+			       MG_DP_MODE_CFG_DP_X2_MODE;
+			break;
+		case 0xF:
+			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+			       MG_DP_MODE_CFG_DP_X2_MODE;
+			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+			       MG_DP_MODE_CFG_DP_X2_MODE;
+			break;
+		default:
+			MISSING_CASE(lane_info);
+		}
+		break;
+
+	case TC_PORT_LEGACY:
+		ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+		ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+		break;
+
+	default:
+		MISSING_CASE(intel_dig_port->tc_type);
+		return;
+	}
+
+	I915_WRITE(MG_DP_MODE(port, 0), ln0);
+	I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+					const struct intel_crtc_state *crtc_state)
+{
+	if (!crtc_state->fec_enable)
+		return;
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+		DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
+
+	if (!crtc_state->fec_enable)
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val |= DP_TP_CTL_FEC_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+				    DP_TP_STATUS_FEC_ENABLE_LIVE,
+				    DP_TP_STATUS_FEC_ENABLE_LIVE,
+				    1))
+		DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+					const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	u32 val;
+
+	if (!crtc_state->fec_enable)
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~DP_TP_CTL_FEC_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+	POSTING_READ(DP_TP_CTL(port));
+}
+
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 				    const struct intel_crtc_state *crtc_state,
 				    const struct drm_connector_state *conn_state)
@@ -2894,19 +3169,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
 	WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
 
-	intel_display_power_get(dev_priv,
-				intel_ddi_main_link_aux_domain(intel_dp));
-
 	intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
 				 crtc_state->lane_count, is_mst);
 
 	intel_edp_panel_on(intel_dp);
 
-	intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+	intel_ddi_clk_select(encoder, crtc_state);
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
-	icl_program_mg_dp_mode(intel_dp);
+	icl_program_mg_dp_mode(dig_port);
 	icl_disable_phy_clock_gating(dig_port);
 
 	if (IS_ICELAKE(dev_priv))
@@ -2922,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	intel_ddi_init_dp_buf_reg(encoder);
 	if (!is_mst)
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+					      true);
+	intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
 	intel_dp_start_link_train(intel_dp);
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
 		intel_dp_stop_link_train(intel_dp);
 
+	intel_ddi_enable_fec(encoder, crtc_state);
+
 	icl_enable_phy_clock_gating(dig_port);
 
 	if (!is_mst)
 		intel_ddi_enable_pipe_clock(crtc_state);
+
+	intel_dsc_enable(encoder, crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2944,10 +3223,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-	intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+	intel_ddi_clk_select(encoder, crtc_state);
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
+	icl_program_mg_dp_mode(dig_port);
+	icl_disable_phy_clock_gating(dig_port);
+
 	if (IS_ICELAKE(dev_priv))
 		icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
 					level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3240,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	else
 		intel_prepare_hdmi_ddi_buffers(encoder, level);
 
+	icl_enable_phy_clock_gating(dig_port);
+
 	if (IS_GEN9_BC(dev_priv))
 		skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
 	intel_ddi_enable_pipe_clock(crtc_state);
 
-	intel_dig_port->set_infoframes(&encoder->base,
+	intel_dig_port->set_infoframes(encoder,
 				       crtc_state->has_infoframe,
 				       crtc_state, conn_state);
 }
@@ -2991,15 +3275,31 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 
 	WARN_ON(crtc_state->has_pch_encoder);
 
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_map_plls_to_ports(encoder, crtc_state);
+
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
 		intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
-	else
+	} else {
+		struct intel_lspcon *lspcon =
+				enc_to_intel_lspcon(&encoder->base);
+
 		intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+		if (lspcon->active) {
+			struct intel_digital_port *dig_port =
+					enc_to_dig_port(&encoder->base);
+
+			dig_port->set_infoframes(encoder,
+						 crtc_state->has_infoframe,
+						 crtc_state, conn_state);
+		}
+	}
 }
 
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
@@ -3018,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
 	val |= DP_TP_CTL_LINK_TRAIN_PAT1;
 	I915_WRITE(DP_TP_CTL(port), val);
 
+	/* Disable FEC in DP Sink */
+	intel_ddi_disable_fec_state(encoder, crtc_state);
+
 	if (wait)
 		intel_wait_ddi_buf_idle(dev_priv, port);
 }
@@ -3041,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 	}
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 
 	intel_edp_panel_vdd_on(intel_dp);
 	intel_edp_panel_off(intel_dp);
@@ -3049,9 +3352,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 	intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
 	intel_ddi_clk_disable(encoder);
-
-	intel_display_power_put(dev_priv,
-				intel_ddi_main_link_aux_domain(intel_dp));
 }
 
 static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,12 +3362,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
 
-	dig_port->set_infoframes(&encoder->base, false,
+	dig_port->set_infoframes(encoder, false,
 				 old_crtc_state, old_conn_state);
 
 	intel_ddi_disable_pipe_clock(old_crtc_state);
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 
 	intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
@@ -3080,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 				   const struct intel_crtc_state *old_crtc_state,
 				   const struct drm_connector_state *old_conn_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
 	/*
 	 * When called from DP MST code:
 	 * - old_conn_state will be NULL
@@ -3099,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
 	else
 		intel_ddi_post_disable_dp(encoder,
 					  old_crtc_state, old_conn_state);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_unmap_plls_to_ports(encoder);
 }
 
 void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3118,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 	val &= ~FDI_RX_ENABLE;
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
-	intel_disable_ddi_buf(encoder);
+	intel_disable_ddi_buf(encoder, old_crtc_state);
 	intel_ddi_clk_disable(encoder);
 
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3154,6 +3459,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
 		intel_audio_codec_enable(encoder, crtc_state, conn_state);
 }
 
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+			       enum port port)
+{
+	static const i915_reg_t regs[] = {
+		[PORT_A] = CHICKEN_TRANS_EDP,
+		[PORT_B] = CHICKEN_TRANS_A,
+		[PORT_C] = CHICKEN_TRANS_B,
+		[PORT_D] = CHICKEN_TRANS_C,
+		[PORT_E] = CHICKEN_TRANS_A,
+	};
+
+	WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+	if (WARN_ON(port < PORT_A || port > PORT_E))
+		port = PORT_A;
+
+	return regs[port];
+}
+
 static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state)
@@ -3177,17 +3502,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 		 * the bits affect a specific DDI port rather than
 		 * a specific transcoder.
 		 */
-		static const enum transcoder port_to_transcoder[] = {
-			[PORT_A] = TRANSCODER_EDP,
-			[PORT_B] = TRANSCODER_A,
-			[PORT_C] = TRANSCODER_B,
-			[PORT_D] = TRANSCODER_C,
-			[PORT_E] = TRANSCODER_A,
-		};
-		enum transcoder transcoder = port_to_transcoder[port];
+		i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
 		u32 val;
 
-		val = I915_READ(CHICKEN_TRANS(transcoder));
+		val = I915_READ(reg);
 
 		if (port == PORT_E)
 			val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3514,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 			val |= DDI_TRAINING_OVERRIDE_ENABLE |
 				DDI_TRAINING_OVERRIDE_VALUE;
 
-		I915_WRITE(CHICKEN_TRANS(transcoder), val);
-		POSTING_READ(CHICKEN_TRANS(transcoder));
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
 
 		udelay(1);
 
@@ -3208,7 +3526,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 			val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
 				 DDI_TRAINING_OVERRIDE_VALUE);
 
-		I915_WRITE(CHICKEN_TRANS(transcoder), val);
+		I915_WRITE(reg, val);
 	}
 
 	/* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3252,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
 	intel_edp_drrs_disable(intel_dp, old_crtc_state);
 	intel_psr_disable(intel_dp, old_crtc_state);
 	intel_edp_backlight_off(old_conn_state);
+	/* Disable the decompression in DP Sink */
+	intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+					      false);
 }
 
 static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3282,13 +3603,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
 		intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
-				   const struct intel_crtc_state *pipe_config,
-				   const struct drm_connector_state *conn_state)
+static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
+					 const struct intel_crtc_state *pipe_config,
+					 enum port port)
 {
-	uint8_t mask = pipe_config->lane_lat_optim_mask;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+	u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
 
-	bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
+	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+	switch (pipe_config->lane_count) {
+	case 1:
+		val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+		DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+		break;
+	case 2:
+		val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+		DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+		break;
+	case 4:
+		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+		break;
+	default:
+		MISSING_CASE(pipe_config->lane_count);
+	}
+	I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+}
+
+static void
+intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+			 const struct intel_crtc_state *crtc_state,
+			 const struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	enum port port = encoder->port;
+
+	if (intel_crtc_has_dp_encoder(crtc_state) ||
+	    intel_port_is_tc(dev_priv, encoder->port))
+		intel_display_power_get(dev_priv,
+					intel_ddi_main_link_aux_domain(dig_port));
+
+	if (IS_GEN9_LP(dev_priv))
+		bxt_ddi_phy_set_lane_optim_mask(encoder,
+						crtc_state->lane_lat_optim_mask);
+
+	/*
+	 * Program the lane count for static/dynamic connections on Type-C ports.
+	 * Skip this step for TBT.
+	 */
+	if (dig_port->tc_type == TC_PORT_UNKNOWN ||
+	    dig_port->tc_type == TC_PORT_TBT)
+		return;
+
+	intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
+}
+
+static void
+intel_ddi_post_pll_disable(struct intel_encoder *encoder,
+			   const struct intel_crtc_state *crtc_state,
+			   const struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+	if (intel_crtc_has_dp_encoder(crtc_state) ||
+	    intel_port_is_tc(dev_priv, encoder->port))
+		intel_display_power_put(dev_priv,
+					intel_ddi_main_link_aux_domain(dig_port));
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3737,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 					 struct intel_crtc_state *crtc_state)
 {
-	if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
-		crtc_state->min_voltage_level = 2;
-	else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+	if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
 		crtc_state->min_voltage_level = 1;
+	else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+		crtc_state->min_voltage_level = 2;
 }
 
 void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3790,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 		pipe_config->has_hdmi_sink = true;
 		intel_dig_port = enc_to_dig_port(&encoder->base);
 
-		if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+		if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
 			pipe_config->has_infoframe = true;
 
 		if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4151,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	struct intel_encoder *intel_encoder;
 	struct drm_encoder *encoder;
 	bool init_hdmi, init_dp, init_lspcon = false;
+	enum pipe pipe;
 
 
 	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4190,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_encoder->compute_output_type = intel_ddi_compute_output_type;
 	intel_encoder->compute_config = intel_ddi_compute_config;
 	intel_encoder->enable = intel_enable_ddi;
-	if (IS_GEN9_LP(dev_priv))
-		intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+	intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+	intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
 	intel_encoder->pre_enable = intel_ddi_pre_enable;
 	intel_encoder->disable = intel_disable_ddi;
 	intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4202,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_encoder->type = INTEL_OUTPUT_DDI;
 	intel_encoder->power_domain = intel_port_to_power_domain(port);
 	intel_encoder->port = port;
-	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
+	for_each_pipe(dev_priv, pipe)
+		intel_encoder->crtc_mask |= BIT(pipe);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4214,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
 	switch (port) {
 	case PORT_A:
@@ -3858,8 +4245,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 		MISSING_CASE(port);
 	}
 
-	intel_infoframe_init(intel_dig_port);
-
 	if (init_dp) {
 		if (!intel_ddi_init_dp_connector(intel_dig_port))
 			goto err;
@@ -3888,6 +4273,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 				port_name(port));
 	}
 
+	intel_infoframe_init(intel_dig_port);
 	return;
 
 err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 01fa982..1e56319 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
 }
 
 static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -744,27 +748,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 	if (INTEL_GEN(dev_priv) >= 10) {
 		for_each_pipe(dev_priv, pipe)
 			info->num_scalers[pipe] = 2;
-	} else if (INTEL_GEN(dev_priv) == 9) {
+	} else if (IS_GEN9(dev_priv)) {
 		info->num_scalers[PIPE_A] = 2;
 		info->num_scalers[PIPE_B] = 2;
 		info->num_scalers[PIPE_C] = 1;
 	}
 
-	BUILD_BUG_ON(I915_NUM_ENGINES >
-		     sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+	BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
 
-	/*
-	 * Skylake and Broxton currently don't expose the topmost plane as its
-	 * use is exclusive with the legacy cursor and we only want to expose
-	 * one of those, not both. Until we can safely expose the topmost plane
-	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
-	 * we don't expose the topmost plane at all to prevent ABI breakage
-	 * down the line.
-	 */
-	if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+	if (IS_GEN11(dev_priv))
+		for_each_pipe(dev_priv, pipe)
+			info->num_sprites[pipe] = 6;
+	else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 3;
 	else if (IS_BROXTON(dev_priv)) {
+		/*
+		 * Skylake and Broxton currently don't expose the topmost plane as its
+		 * use is exclusive with the legacy cursor and we only want to expose
+		 * one of those, not both. Until we can safely expose the topmost plane
+		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+		 * we don't expose the topmost plane at all to prevent ABI breakage
+		 * down the line.
+		 */
+
 		info->num_sprites[PIPE_A] = 2;
 		info->num_sprites[PIPE_B] = 2;
 		info->num_sprites[PIPE_C] = 1;
@@ -779,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 	if (i915_modparams.disable_display) {
 		DRM_INFO("Display disabled (module parameter)\n");
 		info->num_pipes = 0;
-	} else if (info->num_pipes > 0 &&
+	} else if (HAS_DISPLAY(dev_priv) &&
 		   (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
 		   HAS_PCH_SPLIT(dev_priv)) {
 		u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -804,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 			DRM_INFO("PipeC fused off\n");
 			info->num_pipes -= 1;
 		}
-	} else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+	} else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
 		u32 dfsm = I915_READ(SKL_DFSM);
 		u8 disabled_mask = 0;
 		bool invalid;
@@ -844,13 +851,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 		cherryview_sseu_info_init(dev_priv);
 	else if (IS_BROADWELL(dev_priv))
 		broadwell_sseu_info_init(dev_priv);
-	else if (INTEL_GEN(dev_priv) == 9)
+	else if (IS_GEN9(dev_priv))
 		gen9_sseu_info_init(dev_priv);
-	else if (INTEL_GEN(dev_priv) == 10)
+	else if (IS_GEN10(dev_priv))
 		gen10_sseu_info_init(dev_priv);
 	else if (INTEL_GEN(dev_priv) >= 11)
 		gen11_sseu_info_init(dev_priv);
 
+	if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+		DRM_INFO("Disabling ppGTT for VT-d support\n");
+		info->ppgtt = INTEL_PPGTT_NONE;
+	}
+
 	/* Initialize command stream timestamp frequency */
 	info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
 }
@@ -872,40 +884,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
 void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 {
 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
-	u8 vdbox_disable, vebox_disable;
 	u32 media_fuse;
-	int i;
+	unsigned int i;
 
 	if (INTEL_GEN(dev_priv) < 11)
 		return;
 
-	media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
 
-	vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-	vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-			GEN11_GT_VEBOX_DISABLE_SHIFT;
+	info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+	info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+			     GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-	DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+	DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
 	for (i = 0; i < I915_MAX_VCS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VCS(i)))
 			continue;
 
-		if (!(BIT(i) & vdbox_disable))
-			continue;
-
-		info->ring_mask &= ~ENGINE_MASK(_VCS(i));
-		DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+		if (!(BIT(i) & info->vdbox_enable)) {
+			info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+		}
 	}
 
-	DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+	DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
 	for (i = 0; i < I915_MAX_VECS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VECS(i)))
 			continue;
 
-		if (!(BIT(i) & vebox_disable))
-			continue;
-
-		info->ring_mask &= ~ENGINE_MASK(_VECS(i));
-		DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+		if (!(BIT(i) & info->vebox_enable)) {
+			info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64..1caf24e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
 #ifndef _INTEL_DEVICE_INFO_H_
 #define _INTEL_DEVICE_INFO_H_
 
+#include <uapi/drm/i915_drm.h>
+
 #include "intel_display.h"
 
 struct drm_printer;
@@ -74,51 +76,58 @@ enum intel_platform {
 	INTEL_MAX_PLATFORMS
 };
 
+enum intel_ppgtt {
+	INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
+	INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
+	INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
+	INTEL_PPGTT_FULL_4LVL,
+};
+
 #define DEV_INFO_FOR_EACH_FLAG(func) \
 	func(is_mobile); \
 	func(is_lp); \
 	func(is_alpha_support); \
 	/* Keep has_* in alphabetical order */ \
 	func(has_64bit_reloc); \
-	func(has_aliasing_ppgtt); \
-	func(has_csr); \
-	func(has_ddi); \
-	func(has_dp_mst); \
 	func(has_reset_engine); \
-	func(has_fbc); \
 	func(has_fpga_dbg); \
-	func(has_full_ppgtt); \
-	func(has_full_48bit_ppgtt); \
-	func(has_gmch_display); \
 	func(has_guc); \
 	func(has_guc_ct); \
-	func(has_hotplug); \
 	func(has_l3_dpf); \
 	func(has_llc); \
 	func(has_logical_ring_contexts); \
 	func(has_logical_ring_elsq); \
 	func(has_logical_ring_preemption); \
-	func(has_overlay); \
 	func(has_pooled_eu); \
-	func(has_psr); \
 	func(has_rc6); \
 	func(has_rc6p); \
 	func(has_runtime_pm); \
 	func(has_snoop); \
 	func(has_coherent_ggtt); \
 	func(unfenced_needs_alignment); \
+	func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+	/* Keep in alphabetical order */ \
 	func(cursor_needs_physical); \
-	func(hws_needs_physical); \
+	func(has_csr); \
+	func(has_ddi); \
+	func(has_dp_mst); \
+	func(has_fbc); \
+	func(has_gmch_display); \
+	func(has_hotplug); \
+	func(has_ipc); \
+	func(has_overlay); \
+	func(has_psr); \
 	func(overlay_needs_physical); \
-	func(supports_tv); \
-	func(has_ipc);
+	func(supports_tv);
 
 #define GEN_MAX_SLICES		(6) /* CNL upper bound */
 #define GEN_MAX_SUBSLICES	(8) /* ICL upper bound */
 
 struct sseu_dev_info {
 	u8 slice_mask;
-	u8 subslice_mask[GEN_MAX_SUBSLICES];
+	u8 subslice_mask[GEN_MAX_SLICES];
 	u16 eu_total;
 	u8 eu_per_subslice;
 	u8 min_eu_in_pool;
@@ -154,6 +163,7 @@ struct intel_device_info {
 	enum intel_platform platform;
 	u32 platform_mask;
 
+	enum intel_ppgtt ppgtt;
 	unsigned int page_sizes; /* page sizes supported by the HW */
 
 	u32 display_mmio_offset;
@@ -165,12 +175,18 @@ struct intel_device_info {
 #define DEFINE_FLAG(name) u8 name:1
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
+
+	struct {
+#define DEFINE_FLAG(name) u8 name:1
+		DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+	} display;
+
 	u16 ddb_size; /* in blocks */
 
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
 	int trans_offsets[I915_MAX_TRANSCODERS];
-	int palette_offsets[I915_MAX_PIPES];
 	int cursor_offsets[I915_MAX_PIPES];
 
 	/* Slice/subslice/EU info */
@@ -178,6 +194,10 @@ struct intel_device_info {
 
 	u32 cs_timestamp_frequency_khz;
 
+	/* Enabled (not fused off) media engine bitmasks. */
+	u8 vdbox_enable;
+	u8 vebox_enable;
+
 	struct color_luts {
 		u16 degamma_lut_size;
 		u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c9878dd..07c8618 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
  *	Eric Anholt <eric@anholt.net>
  */
 
-#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
 	DRM_FORMAT_MOD_INVALID
 };
 
-static const uint32_t skl_primary_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_YUYV,
-	DRM_FORMAT_YVYU,
-	DRM_FORMAT_UYVY,
-	DRM_FORMAT_VYUY,
-};
-
-static const uint32_t skl_pri_planar_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_YUYV,
-	DRM_FORMAT_YVYU,
-	DRM_FORMAT_UYVY,
-	DRM_FORMAT_VYUY,
-	DRM_FORMAT_NV12,
-};
-
-static const uint64_t skl_format_modifiers_noccs[] = {
-	I915_FORMAT_MOD_Yf_TILED,
-	I915_FORMAT_MOD_Y_TILED,
-	I915_FORMAT_MOD_X_TILED,
-	DRM_FORMAT_MOD_LINEAR,
-	DRM_FORMAT_MOD_INVALID
-};
-
-static const uint64_t skl_format_modifiers_ccs[] = {
-	I915_FORMAT_MOD_Yf_TILED_CCS,
-	I915_FORMAT_MOD_Y_TILED_CCS,
-	I915_FORMAT_MOD_Yf_TILED,
-	I915_FORMAT_MOD_Y_TILED,
-	I915_FORMAT_MOD_X_TILED,
-	DRM_FORMAT_MOD_LINEAR,
-	DRM_FORMAT_MOD_INVALID
-};
-
 /* Cursor formats */
 static const uint32_t intel_cursor_formats[] = {
 	DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
 				  struct drm_i915_gem_object *obj,
 				  struct drm_mode_fb_cmd2 *mode_cmd);
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-					 struct intel_link_m_n *m_n,
-					 struct intel_link_m_n *m2_n2);
-static void ironlake_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipemisc(struct drm_crtc *crtc);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+					 const struct intel_link_m_n *m_n,
+					 const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
 static void vlv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 				    struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(struct intel_crtc *crtc);
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
-static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void intel_modeset_setup_hw_state(struct drm_device *dev,
 					 struct drm_modeset_acquire_ctx *ctx);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
 };
 
 static void
-skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
-	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-		return;
-
-	if (enable)
-		I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
-	else
-		I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
-}
-
-static void
 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
 {
-	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-		return;
-
 	if (enable)
 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
 			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
 			pipe_name(pipe));
 
+	/* PCH SDVOB multiplex with HDMIB */
 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 	}
 }
 
-static void i9xx_disable_pll(struct intel_crtc *crtc)
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
 	/* Disable DVO 2x clock on both PLLs if necessary */
 	if (IS_I830(dev_priv) &&
-	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
 	    !intel_num_dvo_pipes(dev_priv)) {
 		I915_WRITE(DPLL(PIPE_B),
 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 		     I915_READ(dpll_reg) & port_mask, expected_mask);
 }
 
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
-					   enum pipe pipe)
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
-								pipe);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 	i915_reg_t reg;
 	uint32_t val, pipeconf_val;
 
 	/* Make sure PCH DPLL is enabled */
-	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
+	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
 
 	/* FDI must be feeding us bits for PCH ports */
 	assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 		 * here for both 8bpc and 12bpc.
 		 */
 		val &= ~PIPECONF_BPC_MASK;
-		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
+		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 			val |= PIPECONF_8BPC;
 		else
 			val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	val &= ~TRANS_INTERLACE_MASK;
 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
 		if (HAS_PCH_IBX(dev_priv) &&
-		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
 			val |= TRANS_LEGACY_INTERLACED_ILK;
 		else
 			val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
 	return new_offset;
 }
 
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+	return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
 static u32 intel_adjust_aligned_offset(int *x, int *y,
 				       const struct drm_framebuffer *fb,
 				       int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
 
 	WARN_ON(new_offset > old_offset);
 
-	if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+	if (!is_surface_linear(fb->modifier, color_plane)) {
 		unsigned int tile_size, tile_width, tile_height;
 		unsigned int pitch_tiles;
 
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
 					unsigned int rotation,
 					u32 alignment)
 {
-	uint64_t fb_modifier = fb->modifier;
 	unsigned int cpp = fb->format->cpp[color_plane];
 	u32 offset, offset_aligned;
 
 	if (alignment)
 		alignment--;
 
-	if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
+	if (!is_surface_linear(fb->modifier, color_plane)) {
 		unsigned int tile_size, tile_width, tile_height;
 		unsigned int tile_rows, tiles, pitch_tiles;
 
@@ -2400,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
 				 int color_plane)
 {
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	unsigned int height;
 
 	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
-	    fb->offsets[color_plane] % intel_tile_size(dev_priv))
+	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+			      fb->offsets[color_plane], color_plane);
 		return -EINVAL;
+	}
+
+	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+	height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+	/* Catch potential overflows early */
+	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+			    fb->offsets[color_plane])) {
+		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+			      fb->offsets[color_plane], fb->pitches[color_plane],
+			      color_plane);
+		return -ERANGE;
+	}
 
 	*x = 0;
 	*y = 0;
@@ -2574,7 +2531,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 						      tile_size);
 		offset /= tile_size;
 
-		if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+		if (!is_surface_linear(fb->modifier, i)) {
 			unsigned int tile_width, tile_height;
 			unsigned int pitch_tiles;
 			struct drm_rect r;
@@ -2788,10 +2745,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 		crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
 	else
 		crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-
-	DRM_DEBUG_KMS("%s active planes 0x%x\n",
-		      crtc_state->base.crtc->name,
-		      crtc_state->active_planes);
 }
 
 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2772,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	struct intel_plane_state *plane_state =
 		to_intel_plane_state(plane->base.state);
 
+	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+		      plane->base.base.id, plane->base.name,
+		      crtc->base.base.id, crtc->base.name);
+
 	intel_set_plane_visible(crtc_state, plane_state, false);
 	fixup_active_planes(crtc_state);
 
@@ -2826,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 		intel_pre_disable_primary_noatomic(&crtc->base);
 
 	trace_intel_disable_plane(&plane->base, crtc);
-	plane->disable_plane(plane, crtc);
+	plane->disable_plane(plane, crtc_state);
 }
 
 static void
@@ -3099,28 +3056,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 	return 0;
 }
 
-static int
-skl_check_nv12_surface(struct intel_plane_state *plane_state)
-{
-	/* Display WA #1106 */
-	if (plane_state->base.rotation !=
-	    (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
-	    plane_state->base.rotation != DRM_MODE_ROTATE_270)
-		return 0;
-
-	/*
-	 * src coordinates are rotated here.
-	 * We check height but report it as width
-	 */
-	if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
-		DRM_DEBUG_KMS("src width must be multiple "
-			      "of 4 for rotated NV12\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3199,9 +3134,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
 	 * the main surface setup depends on it.
 	 */
 	if (fb->format->format == DRM_FORMAT_NV12) {
-		ret = skl_check_nv12_surface(plane_state);
-		if (ret)
-			return ret;
 		ret = skl_check_nv12_aux_surface(plane_state);
 		if (ret)
 			return ret;
@@ -3399,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 	u32 linear_offset;
 	u32 dspcntr = plane_state->ctl;
-	i915_reg_t reg = DSPCNTR(i9xx_plane);
 	int x = plane_state->color_plane[0].x;
 	int y = plane_state->color_plane[0].y;
 	unsigned long irqflags;
@@ -3414,48 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
+	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
 	if (INTEL_GEN(dev_priv) < 4) {
 		/* pipesrc and dspsize control the size that is scaled from,
 		 * which should always be the user's requested size.
 		 */
+		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
 			      ((crtc_state->pipe_src_h - 1) << 16) |
 			      (crtc_state->pipe_src_w - 1));
-		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
 			      ((crtc_state->pipe_src_h - 1) << 16) |
 			      (crtc_state->pipe_src_w - 1));
-		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
 	}
 
-	I915_WRITE_FW(reg, dspcntr);
-
-	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		I915_WRITE_FW(DSPSURF(i9xx_plane),
-			      intel_plane_ggtt_offset(plane_state) +
-			      dspaddr_offset);
 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
 	} else if (INTEL_GEN(dev_priv) >= 4) {
+		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+	}
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
+	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+	if (INTEL_GEN(dev_priv) >= 4)
 		I915_WRITE_FW(DSPSURF(i9xx_plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      dspaddr_offset);
-		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
-		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
-	} else {
+	else
 		I915_WRITE_FW(DSPADDR(i9xx_plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      dspaddr_offset);
-	}
-	POSTING_READ_FW(reg);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void i9xx_disable_plane(struct intel_plane *plane,
-			       struct intel_crtc *crtc)
+			       const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3468,7 +3402,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
 		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
 	else
 		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
-	POSTING_READ_FW(DSPCNTR(i9xx_plane));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -3528,13 +3461,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc_scaler_state *scaler_state;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
 	int i;
 
-	scaler_state = &intel_crtc->config->scaler_state;
-
 	/* loop through and disable scalers that aren't in use */
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
 		if (!scaler_state->scalers[i].in_use)
@@ -3542,6 +3475,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 	}
 }
 
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+					  int color_plane, unsigned int rotation)
+{
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes chunks for
+	 * linear buffers or in number of tiles for tiled buffers.
+	 */
+	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+		return 64;
+	else if (drm_rotation_90_or_270(rotation))
+		return intel_tile_height(fb, color_plane);
+	else
+		return intel_tile_width_bytes(fb, color_plane);
+}
+
 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 		     int color_plane)
 {
@@ -3552,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
 	if (color_plane >= fb->format->num_planes)
 		return 0;
 
-	/*
-	 * The stride is either expressed as a multiple of 64 bytes chunks for
-	 * linear buffers or in number of tiles for tiled buffers.
-	 */
-	if (drm_rotation_90_or_270(rotation))
-		stride /= intel_tile_height(fb, color_plane);
-	else
-		stride /= intel_fb_stride_alignment(fb, color_plane);
-
-	return stride;
+	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
 }
 
 static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -3598,29 +3537,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
 	return 0;
 }
 
-/*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
-static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-	switch (pixel_format) {
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_ARGB8888:
+	if (!plane_state->base.fb->format->has_alpha)
+		return PLANE_CTL_ALPHA_DISABLE;
+
+	switch (plane_state->base.pixel_blend_mode) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+		return PLANE_CTL_ALPHA_DISABLE;
+	case DRM_MODE_BLEND_PREMULTI:
 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+	case DRM_MODE_BLEND_COVERAGE:
+		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
 	default:
+		MISSING_CASE(plane_state->base.pixel_blend_mode);
 		return PLANE_CTL_ALPHA_DISABLE;
 	}
 }
 
-static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
 {
-	switch (pixel_format) {
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_ARGB8888:
+	if (!plane_state->base.fb->format->has_alpha)
+		return PLANE_COLOR_ALPHA_DISABLE;
+
+	switch (plane_state->base.pixel_blend_mode) {
+	case DRM_MODE_BLEND_PIXEL_NONE:
+		return PLANE_COLOR_ALPHA_DISABLE;
+	case DRM_MODE_BLEND_PREMULTI:
 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+	case DRM_MODE_BLEND_COVERAGE:
+		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
 	default:
+		MISSING_CASE(plane_state->base.pixel_blend_mode);
 		return PLANE_COLOR_ALPHA_DISABLE;
 	}
 }
@@ -3697,7 +3645,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 	plane_ctl = PLANE_CTL_ENABLE;
 
 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
-		plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
+		plane_ctl |= skl_plane_ctl_alpha(plane_state);
 		plane_ctl |=
 			PLANE_CTL_PIPE_GAMMA_ENABLE |
 			PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3732,6 +3680,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv =
 		to_i915(plane_state->base.plane->dev);
 	const struct drm_framebuffer *fb = plane_state->base.fb;
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 	u32 plane_color_ctl = 0;
 
 	if (INTEL_GEN(dev_priv) < 11) {
@@ -3739,9 +3688,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
 	}
 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
-	plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
 
-	if (fb->format->is_yuv) {
+	if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
 		else
@@ -3749,6 +3698,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 
 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+	} else if (fb->format->is_yuv) {
+		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
 	}
 
 	return plane_color_ctl;
@@ -3933,15 +3884,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
 
 	/* on skylake this is done by detaching scalers */
 	if (INTEL_GEN(dev_priv) >= 9) {
-		skl_detach_scalers(crtc);
+		skl_detach_scalers(new_crtc_state);
 
 		if (new_crtc_state->pch_pfit.enabled)
-			skylake_pfit_enable(crtc);
+			skylake_pfit_enable(new_crtc_state);
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		if (new_crtc_state->pch_pfit.enabled)
-			ironlake_pfit_enable(crtc);
+			ironlake_pfit_enable(new_crtc_state);
 		else if (old_crtc_state->pch_pfit.enabled)
-			ironlake_pfit_disable(crtc, true);
+			ironlake_pfit_disable(old_crtc_state);
 	}
 }
 
@@ -4340,10 +4291,10 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	int pipe = intel_crtc->pipe;
 	i915_reg_t reg;
 	u32 temp;
@@ -4352,7 +4303,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
-	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
@@ -4501,10 +4452,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 }
 
 /* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct intel_crtc *crtc)
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	int clock = crtc->config->base.adjusted_mode.crtc_clock;
+	int clock = crtc_state->base.adjusted_mode.crtc_clock;
 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 	u32 temp;
 
@@ -4615,12 +4567,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
 				 desired_divisor << auxdiv);
 }
 
-static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
 						enum pipe pch_transcoder)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
 		   I915_READ(HTOTAL(cpu_transcoder)));
@@ -4639,9 +4591,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
 }
 
-static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t temp;
 
 	temp = I915_READ(SOUTH_CHICKEN1);
@@ -4660,22 +4611,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
 	POSTING_READ(SOUTH_CHICKEN1);
 }
 
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = intel_crtc->base.dev;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	switch (intel_crtc->pipe) {
+	switch (crtc->pipe) {
 	case PIPE_A:
 		break;
 	case PIPE_B:
-		if (intel_crtc->config->fdi_lanes > 2)
-			cpt_set_fdi_bc_bifurcation(dev, false);
+		if (crtc_state->fdi_lanes > 2)
+			cpt_set_fdi_bc_bifurcation(dev_priv, false);
 		else
-			cpt_set_fdi_bc_bifurcation(dev, true);
+			cpt_set_fdi_bc_bifurcation(dev_priv, true);
 
 		break;
 	case PIPE_C:
-		cpt_set_fdi_bc_bifurcation(dev, true);
+		cpt_set_fdi_bc_bifurcation(dev_priv, true);
 
 		break;
 	default:
@@ -4732,7 +4684,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 	assert_pch_transcoder_disabled(dev_priv, pipe);
 
 	if (IS_IVYBRIDGE(dev_priv))
-		ivybridge_update_fdi_bc_bifurcation(crtc);
+		ivybridge_update_fdi_bc_bifurcation(crtc_state);
 
 	/* Write the TU size bits before fdi link training, so that error
 	 * detection works. */
@@ -4765,11 +4717,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 	 * Note that enable_shared_dpll tries to do the right thing, but
 	 * get_shared_dpll unconditionally resets the pll - we need that to have
 	 * the right LVDS enable sequence. */
-	intel_enable_shared_dpll(crtc);
+	intel_enable_shared_dpll(crtc_state);
 
 	/* set transcoder timing, panel must allow it */
 	assert_panel_unlocked(dev_priv, pipe);
-	ironlake_pch_transcoder_set_timings(crtc, pipe);
+	ironlake_pch_transcoder_set_timings(crtc_state, pipe);
 
 	intel_fdi_normal_train(crtc);
 
@@ -4801,7 +4753,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
 		I915_WRITE(reg, temp);
 	}
 
-	ironlake_enable_pch_transcoder(dev_priv, pipe);
+	ironlake_enable_pch_transcoder(crtc_state);
 }
 
 static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4813,10 +4765,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
 
 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
 
-	lpt_program_iclkip(crtc);
+	lpt_program_iclkip(crtc_state);
 
 	/* Set transcoder timing. */
-	ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
+	ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
 
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
@@ -4904,8 +4856,7 @@ static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		  unsigned int scaler_user, int *scaler_id,
 		  int src_w, int src_h, int dst_w, int dst_h,
-		  bool plane_scaler_check,
-		  uint32_t pixel_format)
+		  const struct drm_format_info *format, bool need_scaler)
 {
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
@@ -4914,21 +4865,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
-	int need_scaling;
 
 	/*
 	 * Src coordinates are already rotated by 270 degrees for
 	 * the 90/270 degree plane rotation cases (to match the
 	 * GTT mapping), hence no need to account for rotation here.
 	 */
-	need_scaling = src_w != dst_w || src_h != dst_h;
-
-	if (plane_scaler_check)
-		if (pixel_format == DRM_FORMAT_NV12)
-			need_scaling = true;
-
-	if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
-		need_scaling = true;
+	if (src_w != dst_w || src_h != dst_h)
+		need_scaler = true;
 
 	/*
 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4937,7 +4881,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	 * for NV12.
 	 */
 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
-	    need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
 		return -EINVAL;
 	}
@@ -4952,7 +4896,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	 * update to free the scaler is done in plane/panel-fit programming.
 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
 	 */
-	if (force_detach || !need_scaling) {
+	if (force_detach || !need_scaler) {
 		if (*scaler_id >= 0) {
 			scaler_state->scaler_users &= ~(1 << scaler_user);
 			scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4966,7 +4910,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		return 0;
 	}
 
-	if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+	if (format && format->format == DRM_FORMAT_NV12 &&
 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
 		DRM_DEBUG_KMS("NV12: src dimensions not met\n");
 		return -EINVAL;
@@ -5009,12 +4953,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 int skl_update_scaler_crtc(struct intel_crtc_state *state)
 {
 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+	bool need_scaler = false;
+
+	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+		need_scaler = true;
 
 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
 				 &state->scaler_state.scaler_id,
 				 state->pipe_src_w, state->pipe_src_h,
 				 adjusted_mode->crtc_hdisplay,
-				 adjusted_mode->crtc_vdisplay, false, 0);
+				 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
 }
 
 /**
@@ -5029,13 +4977,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 				   struct intel_plane_state *plane_state)
 {
-
 	struct intel_plane *intel_plane =
 		to_intel_plane(plane_state->base.plane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	int ret;
-
 	bool force_detach = !fb || !plane_state->base.visible;
+	bool need_scaler = false;
+
+	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+	if (!icl_is_hdr_plane(intel_plane) &&
+	    fb && fb->format->format == DRM_FORMAT_NV12)
+		need_scaler = true;
 
 	ret = skl_update_scaler(crtc_state, force_detach,
 				drm_plane_index(&intel_plane->base),
@@ -5044,7 +4996,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 				drm_rect_height(&plane_state->base.src) >> 16,
 				drm_rect_width(&plane_state->base.dst),
 				drm_rect_height(&plane_state->base.dst),
-				fb ? true : false, fb ? fb->format->format : 0);
+				fb ? fb->format : NULL, need_scaler);
 
 	if (ret || plane_state->scaler_id < 0)
 		return ret;
@@ -5090,27 +5042,27 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
 		skl_detach_scaler(crtc, i);
 }
 
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int pipe = crtc->pipe;
-	struct intel_crtc_scaler_state *scaler_state =
-		&crtc->config->scaler_state;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	const struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
 
-	if (crtc->config->pch_pfit.enabled) {
+	if (crtc_state->pch_pfit.enabled) {
 		u16 uv_rgb_hphase, uv_rgb_vphase;
 		int pfit_w, pfit_h, hscale, vscale;
 		int id;
 
-		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
 			return;
 
-		pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
-		pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
+		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+		pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
 
-		hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
-		vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
+		hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+		vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
 
 		uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -5122,18 +5074,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
 		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
-		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
 	}
 }
 
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	int pipe = crtc->pipe;
 
-	if (crtc->config->pch_pfit.enabled) {
+	if (crtc_state->pch_pfit.enabled) {
 		/* Force use of hard-coded filter coefficients
 		 * as some pre-programmed values are broken,
 		 * e.g. x201.
@@ -5143,8 +5095,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
 						 PF_PIPE_SEL_IVB(pipe));
 		else
 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
-		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
-		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
 	}
 }
 
@@ -5339,11 +5291,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
 	if (!crtc_state->nv12_planes)
 		return false;
 
-	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-		return false;
-
-	if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
-	    IS_CANNONLAKE(dev_priv))
+	/* WA Display #0827: Gen9:all */
+	if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
 		return true;
 
 	return false;
@@ -5386,7 +5335,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
 	    !needs_nv12_wa(dev_priv, pipe_config)) {
 		skl_wa_clkgate(dev_priv, crtc->pipe, false);
-		skl_wa_528(dev_priv, crtc->pipe, false);
 	}
 }
 
@@ -5426,7 +5374,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
 	    needs_nv12_wa(dev_priv, pipe_config)) {
 		skl_wa_clkgate(dev_priv, crtc->pipe, true);
-		skl_wa_528(dev_priv, crtc->pipe, true);
 	}
 
 	/*
@@ -5449,7 +5396,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 *
 	 * WaCxSRDisabledForSpriteScaling:ivb
 	 */
-	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
+	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+	    old_crtc_state->base.active)
 		intel_wait_for_vblank(dev_priv, crtc->pipe);
 
 	/*
@@ -5480,24 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 		intel_update_watermarks(crtc);
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+				      struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *p;
-	int pipe = intel_crtc->pipe;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	unsigned int update_mask = new_crtc_state->update_planes;
+	const struct intel_plane_state *old_plane_state;
+	struct intel_plane *plane;
+	unsigned fb_bits = 0;
+	int i;
 
-	intel_crtc_dpms_overlay_disable(intel_crtc);
+	intel_crtc_dpms_overlay_disable(crtc);
 
-	drm_for_each_plane_mask(p, dev, plane_mask)
-		to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
+	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+		if (crtc->pipe != plane->pipe ||
+		    !(update_mask & BIT(plane->id)))
+			continue;
 
-	/*
-	 * FIXME: Once we grow proper nuclear flip support out of this we need
-	 * to compute the mask of flip planes precisely. For the time being
-	 * consider this a flip to a NULL plane.
-	 */
-	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+		plane->disable_plane(plane, new_crtc_state);
+
+		if (old_plane_state->base.visible)
+			fb_bits |= plane->frontbuffer_bit;
+	}
+
+	intel_frontbuffer_flip(dev_priv, fb_bits);
 }
 
 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5555,7 +5511,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
 		if (conn_state->crtc != crtc)
 			continue;
 
-		encoder->enable(encoder, crtc_state, conn_state);
+		if (encoder->enable)
+			encoder->enable(encoder, crtc_state, conn_state);
 		intel_opregion_notify_encoder(encoder, true);
 	}
 }
@@ -5576,7 +5533,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
 			continue;
 
 		intel_opregion_notify_encoder(encoder, false);
-		encoder->disable(encoder, old_crtc_state, old_conn_state);
+		if (encoder->disable)
+			encoder->disable(encoder, old_crtc_state, old_conn_state);
 	}
 }
 
@@ -5647,37 +5605,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
-	if (intel_crtc->config->has_pch_encoder)
-		intel_prepare_shared_dpll(intel_crtc);
+	if (pipe_config->has_pch_encoder)
+		intel_prepare_shared_dpll(pipe_config);
 
-	if (intel_crtc_has_dp_encoder(intel_crtc->config))
-		intel_dp_set_m_n(intel_crtc, M1_N1);
+	if (intel_crtc_has_dp_encoder(pipe_config))
+		intel_dp_set_m_n(pipe_config, M1_N1);
 
-	intel_set_pipe_timings(intel_crtc);
-	intel_set_pipe_src_size(intel_crtc);
+	intel_set_pipe_timings(pipe_config);
+	intel_set_pipe_src_size(pipe_config);
 
-	if (intel_crtc->config->has_pch_encoder) {
-		intel_cpu_transcoder_set_m_n(intel_crtc,
-				     &intel_crtc->config->fdi_m_n, NULL);
+	if (pipe_config->has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(pipe_config,
+					     &pipe_config->fdi_m_n, NULL);
 	}
 
-	ironlake_set_pipeconf(crtc);
+	ironlake_set_pipeconf(pipe_config);
 
 	intel_crtc->active = true;
 
 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-	if (intel_crtc->config->has_pch_encoder) {
+	if (pipe_config->has_pch_encoder) {
 		/* Note: FDI PLL enabling _must_ be done before we enable the
 		 * cpu pipes, hence this is separate from all the other fdi/pch
 		 * enabling. */
-		ironlake_fdi_pll_enable(intel_crtc);
+		ironlake_fdi_pll_enable(pipe_config);
 	} else {
 		assert_fdi_tx_disabled(dev_priv, pipe);
 		assert_fdi_rx_disabled(dev_priv, pipe);
 	}
 
-	ironlake_pfit_enable(intel_crtc);
+	ironlake_pfit_enable(pipe_config);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5686,10 +5644,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_color_load_luts(&pipe_config->base);
 
 	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
+		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
 	intel_enable_pipe(pipe_config);
 
-	if (intel_crtc->config->has_pch_encoder)
+	if (pipe_config->has_pch_encoder)
 		ironlake_pch_enable(old_intel_state, pipe_config);
 
 	assert_vblank_disabled(crtc);
@@ -5706,7 +5664,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	 * some interlaced HDMI modes. Let's do the double wait always
 	 * in case there are more corner cases we don't know about.
 	 */
-	if (intel_crtc->config->has_pch_encoder) {
+	if (pipe_config->has_pch_encoder) {
 		intel_wait_for_vblank(dev_priv, pipe);
 		intel_wait_for_vblank(dev_priv, pipe);
 	}
@@ -5740,10 +5698,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 	enum pipe pipe = crtc->pipe;
 	uint32_t val;
 
-	val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
-
-	/* Program B credit equally to all pipes */
-	val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+	val = MBUS_DBOX_A_CREDIT(2);
+	val |= MBUS_DBOX_BW_CREDIT(1);
+	val |= MBUS_DBOX_B_CREDIT(8);
 
 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
 }
@@ -5755,7 +5712,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
-	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	struct intel_atomic_state *old_intel_state =
 		to_intel_atomic_state(old_state);
 	bool psl_clkgate_wa;
@@ -5766,37 +5723,34 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
-	if (intel_crtc->config->shared_dpll)
-		intel_enable_shared_dpll(intel_crtc);
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_map_plls_to_ports(crtc, pipe_config, old_state);
+	if (pipe_config->shared_dpll)
+		intel_enable_shared_dpll(pipe_config);
 
 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-	if (intel_crtc_has_dp_encoder(intel_crtc->config))
-		intel_dp_set_m_n(intel_crtc, M1_N1);
+	if (intel_crtc_has_dp_encoder(pipe_config))
+		intel_dp_set_m_n(pipe_config, M1_N1);
 
 	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_set_pipe_timings(intel_crtc);
+		intel_set_pipe_timings(pipe_config);
 
-	intel_set_pipe_src_size(intel_crtc);
+	intel_set_pipe_src_size(pipe_config);
 
 	if (cpu_transcoder != TRANSCODER_EDP &&
 	    !transcoder_is_dsi(cpu_transcoder)) {
 		I915_WRITE(PIPE_MULT(cpu_transcoder),
-			   intel_crtc->config->pixel_multiplier - 1);
+			   pipe_config->pixel_multiplier - 1);
 	}
 
-	if (intel_crtc->config->has_pch_encoder) {
-		intel_cpu_transcoder_set_m_n(intel_crtc,
-				     &intel_crtc->config->fdi_m_n, NULL);
+	if (pipe_config->has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(pipe_config,
+					     &pipe_config->fdi_m_n, NULL);
 	}
 
 	if (!transcoder_is_dsi(cpu_transcoder))
-		haswell_set_pipeconf(crtc);
+		haswell_set_pipeconf(pipe_config);
 
-	haswell_set_pipemisc(crtc);
+	haswell_set_pipemisc(pipe_config);
 
 	intel_color_set_csc(&pipe_config->base);
 
@@ -5804,14 +5758,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-			 intel_crtc->config->pch_pfit.enabled;
+			 pipe_config->pch_pfit.enabled;
 	if (psl_clkgate_wa)
 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
 
 	if (INTEL_GEN(dev_priv) >= 9)
-		skylake_pfit_enable(intel_crtc);
+		skylake_pfit_enable(pipe_config);
 	else
-		ironlake_pfit_enable(intel_crtc);
+		ironlake_pfit_enable(pipe_config);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5844,10 +5798,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_enable_pipe(pipe_config);
 
-	if (intel_crtc->config->has_pch_encoder)
+	if (pipe_config->has_pch_encoder)
 		lpt_pch_enable(old_intel_state, pipe_config);
 
-	if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
+	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
 		intel_ddi_set_vc_payload_alloc(pipe_config, true);
 
 	assert_vblank_disabled(crtc);
@@ -5869,15 +5823,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	}
 }
 
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int pipe = crtc->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	/* To avoid upsetting the power well on haswell only disable the pfit if
 	 * it's in use. The hw state code will make sure we get this right. */
-	if (force || crtc->config->pch_pfit.enabled) {
+	if (old_crtc_state->pch_pfit.enabled) {
 		I915_WRITE(PF_CTL(pipe), 0);
 		I915_WRITE(PF_WIN_POS(pipe), 0);
 		I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5908,14 +5862,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
 	intel_disable_pipe(old_crtc_state);
 
-	ironlake_pfit_disable(intel_crtc, false);
+	ironlake_pfit_disable(old_crtc_state);
 
-	if (intel_crtc->config->has_pch_encoder)
+	if (old_crtc_state->has_pch_encoder)
 		ironlake_fdi_disable(crtc);
 
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-	if (intel_crtc->config->has_pch_encoder) {
+	if (old_crtc_state->has_pch_encoder) {
 		ironlake_disable_pch_transcoder(dev_priv, pipe);
 
 		if (HAS_PCH_CPT(dev_priv)) {
@@ -5966,24 +5920,24 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_ddi_disable_transcoder_func(old_crtc_state);
 
+	intel_dsc_disable(old_crtc_state);
+
 	if (INTEL_GEN(dev_priv) >= 9)
 		skylake_scaler_disable(intel_crtc);
 	else
-		ironlake_pfit_disable(intel_crtc, false);
+		ironlake_pfit_disable(old_crtc_state);
 
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
+	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 }
 
-static void i9xx_pfit_enable(struct intel_crtc *crtc)
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc_state *pipe_config = crtc->config;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (!pipe_config->gmch_pfit.control)
+	if (!crtc_state->gmch_pfit.control)
 		return;
 
 	/*
@@ -5993,8 +5947,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
 	assert_pipe_disabled(dev_priv, crtc->pipe);
 
-	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
-	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
 
 	/* Border color in case we don't scale up to the full screen. Black by
 	 * default, change to something else for debugging. */
@@ -6049,6 +6003,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
 	}
 }
 
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+	switch (dig_port->aux_ch) {
+	case AUX_CH_A:
+		return POWER_DOMAIN_AUX_A;
+	case AUX_CH_B:
+		return POWER_DOMAIN_AUX_B;
+	case AUX_CH_C:
+		return POWER_DOMAIN_AUX_C;
+	case AUX_CH_D:
+		return POWER_DOMAIN_AUX_D;
+	case AUX_CH_E:
+		return POWER_DOMAIN_AUX_E;
+	case AUX_CH_F:
+		return POWER_DOMAIN_AUX_F;
+	default:
+		MISSING_CASE(dig_port->aux_ch);
+		return POWER_DOMAIN_AUX_A;
+	}
+}
+
 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
 				  struct intel_crtc_state *crtc_state)
 {
@@ -6128,20 +6104,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (WARN_ON(intel_crtc->active))
 		return;
 
-	if (intel_crtc_has_dp_encoder(intel_crtc->config))
-		intel_dp_set_m_n(intel_crtc, M1_N1);
+	if (intel_crtc_has_dp_encoder(pipe_config))
+		intel_dp_set_m_n(pipe_config, M1_N1);
 
-	intel_set_pipe_timings(intel_crtc);
-	intel_set_pipe_src_size(intel_crtc);
+	intel_set_pipe_timings(pipe_config);
+	intel_set_pipe_src_size(pipe_config);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-		struct drm_i915_private *dev_priv = to_i915(dev);
-
 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
 		I915_WRITE(CHV_CANVAS(pipe), 0);
 	}
 
-	i9xx_set_pipeconf(intel_crtc);
+	i9xx_set_pipeconf(pipe_config);
 
 	intel_color_set_csc(&pipe_config->base);
 
@@ -6152,16 +6126,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		chv_prepare_pll(intel_crtc, intel_crtc->config);
-		chv_enable_pll(intel_crtc, intel_crtc->config);
+		chv_prepare_pll(intel_crtc, pipe_config);
+		chv_enable_pll(intel_crtc, pipe_config);
 	} else {
-		vlv_prepare_pll(intel_crtc, intel_crtc->config);
-		vlv_enable_pll(intel_crtc, intel_crtc->config);
+		vlv_prepare_pll(intel_crtc, pipe_config);
+		vlv_enable_pll(intel_crtc, pipe_config);
 	}
 
 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-	i9xx_pfit_enable(intel_crtc);
+	i9xx_pfit_enable(pipe_config);
 
 	intel_color_load_luts(&pipe_config->base);
 
@@ -6175,13 +6149,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
-static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
-	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
 }
 
 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6198,15 +6172,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (WARN_ON(intel_crtc->active))
 		return;
 
-	i9xx_set_pll_dividers(intel_crtc);
+	i9xx_set_pll_dividers(pipe_config);
 
-	if (intel_crtc_has_dp_encoder(intel_crtc->config))
-		intel_dp_set_m_n(intel_crtc, M1_N1);
+	if (intel_crtc_has_dp_encoder(pipe_config))
+		intel_dp_set_m_n(pipe_config, M1_N1);
 
-	intel_set_pipe_timings(intel_crtc);
-	intel_set_pipe_src_size(intel_crtc);
+	intel_set_pipe_timings(pipe_config);
+	intel_set_pipe_src_size(pipe_config);
 
-	i9xx_set_pipeconf(intel_crtc);
+	i9xx_set_pipeconf(pipe_config);
 
 	intel_crtc->active = true;
 
@@ -6217,13 +6191,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	i9xx_enable_pll(intel_crtc, pipe_config);
 
-	i9xx_pfit_enable(intel_crtc);
+	i9xx_pfit_enable(pipe_config);
 
 	intel_color_load_luts(&pipe_config->base);
 
 	if (dev_priv->display.initial_watermarks != NULL)
 		dev_priv->display.initial_watermarks(old_intel_state,
-						     intel_crtc->config);
+						     pipe_config);
 	else
 		intel_update_watermarks(intel_crtc);
 	intel_enable_pipe(pipe_config);
@@ -6234,12 +6208,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
-static void i9xx_pfit_disable(struct intel_crtc *crtc)
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (!crtc->config->gmch_pfit.control)
+	if (!old_crtc_state->gmch_pfit.control)
 		return;
 
 	assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6272,17 +6246,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
 	intel_disable_pipe(old_crtc_state);
 
-	i9xx_pfit_disable(intel_crtc);
+	i9xx_pfit_disable(old_crtc_state);
 
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-	if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
+	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
 		if (IS_CHERRYVIEW(dev_priv))
 			chv_disable_pll(dev_priv, pipe);
 		else if (IS_VALLEYVIEW(dev_priv))
 			vlv_disable_pll(dev_priv, pipe);
 		else
-			i9xx_disable_pll(intel_crtc);
+			i9xx_disable_pll(old_crtc_state);
 	}
 
 	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6356,7 +6330,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
 
 	intel_fbc_disable(intel_crtc);
 	intel_update_watermarks(intel_crtc);
-	intel_disable_shared_dpll(intel_crtc);
+	intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
 
 	domains = intel_crtc->enabled_power_domains;
 	for_each_power_domain(domain, domains)
@@ -6434,66 +6408,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
 	}
 }
 
-int intel_connector_init(struct intel_connector *connector)
-{
-	struct intel_digital_connector_state *conn_state;
-
-	/*
-	 * Allocate enough memory to hold intel_digital_connector_state,
-	 * This might be a few bytes too many, but for connectors that don't
-	 * need it we'll free the state and allocate a smaller one on the first
-	 * succesful commit anyway.
-	 */
-	conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
-	if (!conn_state)
-		return -ENOMEM;
-
-	__drm_atomic_helper_connector_reset(&connector->base,
-					    &conn_state->base);
-
-	return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
-	struct intel_connector *connector;
-
-	connector = kzalloc(sizeof *connector, GFP_KERNEL);
-	if (!connector)
-		return NULL;
-
-	if (intel_connector_init(connector) < 0) {
-		kfree(connector);
-		return NULL;
-	}
-
-	return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free
- */
-void intel_connector_free(struct intel_connector *connector)
-{
-	kfree(to_intel_digital_connector_state(connector->base.state));
-	kfree(connector);
-}
-
-/* Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector. */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
-	enum pipe pipe = 0;
-	struct intel_encoder *encoder = connector->encoder;
-
-	return encoder->get_hw_state(encoder, &pipe);
-}
-
 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 {
 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6604,6 +6518,9 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
 			       link_bw, &pipe_config->fdi_m_n, false);
 
 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+	if (ret == -EDEADLK)
+		return ret;
+
 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
 		pipe_config->pipe_bpp -= 2*3;
 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6760,7 +6677,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 		return -EINVAL;
 	}
 
-	if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+	     pipe_config->base.ctm) {
 		/*
 		 * There is only one pipe CSC unit per pipe, and we need that
 		 * for output conversion from RGB->YCBCR. So if CTM is already
@@ -6835,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
 }
 
 void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
 		       int pixel_clock, int link_clock,
 		       struct intel_link_m_n *m_n,
 		       bool constant_n)
@@ -6926,12 +6845,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 }
 
-static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
-					 struct intel_link_m_n *m_n)
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+					 const struct intel_link_m_n *m_n)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int pipe = crtc->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6939,25 +6858,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
 }
 
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-					 struct intel_link_m_n *m_n,
-					 struct intel_link_m_n *m2_n2)
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+				 enum transcoder transcoder)
 {
+	if (IS_HASWELL(dev_priv))
+		return transcoder == TRANSCODER_EDP;
+
+	/*
+	 * Strictly speaking some registers are available before
+	 * gen7, but we only support DRRS on gen7+
+	 */
+	return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+					 const struct intel_link_m_n *m_n,
+					 const struct intel_link_m_n *m2_n2)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	int pipe = crtc->pipe;
-	enum transcoder transcoder = crtc->config->cpu_transcoder;
+	enum pipe pipe = crtc->pipe;
+	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
 	if (INTEL_GEN(dev_priv) >= 5) {
 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
-		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
-		 * for gen < 8) and if DRRS is supported (to make sure the
-		 * registers are not unnecessarily accessed).
+		/*
+		 *  M2_N2 registers are set only if DRRS is supported
+		 * (to make sure the registers are not unnecessarily accessed).
 		 */
-		if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
-		    INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
+		if (m2_n2 && crtc_state->has_drrs &&
+		    transcoder_has_m2_n2(dev_priv, transcoder)) {
 			I915_WRITE(PIPE_DATA_M2(transcoder),
 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6972,29 +6905,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 	}
 }
 
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
 {
-	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
 
 	if (m_n == M1_N1) {
-		dp_m_n = &crtc->config->dp_m_n;
-		dp_m2_n2 = &crtc->config->dp_m2_n2;
+		dp_m_n = &crtc_state->dp_m_n;
+		dp_m2_n2 = &crtc_state->dp_m2_n2;
 	} else if (m_n == M2_N2) {
 
 		/*
 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
 		 * needs to be programmed into M1_N1.
 		 */
-		dp_m_n = &crtc->config->dp_m2_n2;
+		dp_m_n = &crtc_state->dp_m2_n2;
 	} else {
 		DRM_ERROR("Unsupported divider value\n");
 		return;
 	}
 
-	if (crtc->config->has_pch_encoder)
-		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+	if (crtc_state->has_pch_encoder)
+		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
 	else
-		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
 }
 
 static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7093,8 +7026,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
 	/* Set HBR and RBR LPF coefficients */
 	if (pipe_config->port_clock == 162000 ||
-	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
-	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
+	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 				 0x009f0003);
 	else
@@ -7121,7 +7054,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-	if (intel_crtc_has_dp_encoder(crtc->config))
+	if (intel_crtc_has_dp_encoder(pipe_config))
 		coreclk |= 0x01000000;
 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
@@ -7400,12 +7333,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 	crtc_state->dpll_hw_state.dpll = dpll;
 }
 
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-	enum pipe pipe = intel_crtc->pipe;
-	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
 	uint32_t crtc_vtotal, crtc_vblank_end;
 	int vsyncshift = 0;
 
@@ -7419,7 +7353,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 		crtc_vtotal -= 1;
 		crtc_vblank_end -= 1;
 
-		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
 		else
 			vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7461,18 +7395,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 
 }
 
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum pipe pipe = intel_crtc->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 	/* pipesrc controls the size that is scaled from, which should
 	 * always be the user's requested size.
 	 */
 	I915_WRITE(PIPESRC(pipe),
-		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
-		   (intel_crtc->config->pipe_src_h - 1));
+		   ((crtc_state->pipe_src_w - 1) << 16) |
+		   (crtc_state->pipe_src_h - 1));
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7548,29 +7482,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 	drm_mode_set_name(mode);
 }
 
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	uint32_t pipeconf;
 
 	pipeconf = 0;
 
 	/* we keep both pipes enabled on 830 */
 	if (IS_I830(dev_priv))
-		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
 
-	if (intel_crtc->config->double_wide)
+	if (crtc_state->double_wide)
 		pipeconf |= PIPECONF_DOUBLE_WIDE;
 
 	/* only g4x and later have fancy bpc/dither controls */
 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
 	    IS_CHERRYVIEW(dev_priv)) {
 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
-		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
 			pipeconf |= PIPECONF_DITHER_EN |
 				    PIPECONF_DITHER_TYPE_SP;
 
-		switch (intel_crtc->config->pipe_bpp) {
+		switch (crtc_state->pipe_bpp) {
 		case 18:
 			pipeconf |= PIPECONF_6BPC;
 			break;
@@ -7586,9 +7521,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 		}
 	}
 
-	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		if (INTEL_GEN(dev_priv) < 4 ||
-		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 		else
 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7596,11 +7531,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 		pipeconf |= PIPECONF_PROGRESSIVE;
 
 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	     intel_crtc->config->limited_color_range)
+	     crtc_state->limited_color_range)
 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
-	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
-	POSTING_READ(PIPECONF(intel_crtc->pipe));
+	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+	POSTING_READ(PIPECONF(crtc->pipe));
 }
 
 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7963,6 +7898,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+					struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+	pipe_config->lspcon_downsampling = false;
+
+	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+		u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+		if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+			bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+			bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+			if (ycbcr420_enabled) {
+				/* We support 4:2:0 in full blend mode only */
+				if (!blend)
+					output = INTEL_OUTPUT_FORMAT_INVALID;
+				else if (!(IS_GEMINILAKE(dev_priv) ||
+					   INTEL_GEN(dev_priv) >= 10))
+					output = INTEL_OUTPUT_FORMAT_INVALID;
+				else
+					output = INTEL_OUTPUT_FORMAT_YCBCR420;
+			} else {
+				/*
+				 * Currently there is no interface defined to
+				 * check user preference between RGB/YCBCR444
+				 * or YCBCR420. So the only possible case for
+				 * YCBCR444 usage is driving YCBCR420 output
+				 * with LSPCON, when pipe is configured for
+				 * YCBCR444 output and LSPCON takes care of
+				 * downsampling it.
+				 */
+				pipe_config->lspcon_downsampling = true;
+				output = INTEL_OUTPUT_FORMAT_YCBCR444;
+			}
+		}
+	}
+
+	pipe_config->output_format = output;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 				 struct intel_crtc_state *pipe_config)
 {
@@ -7975,6 +7953,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 	pipe_config->shared_dpll = NULL;
 
@@ -8506,16 +8485,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
 		lpt_init_pch_refclk(dev_priv);
 }
 
-static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 	uint32_t val;
 
 	val = 0;
 
-	switch (intel_crtc->config->pipe_bpp) {
+	switch (crtc_state->pipe_bpp) {
 	case 18:
 		val |= PIPECONF_6BPC;
 		break;
@@ -8533,32 +8512,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 		BUG();
 	}
 
-	if (intel_crtc->config->dither)
+	if (crtc_state->dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
 
-	if (intel_crtc->config->limited_color_range)
+	if (crtc_state->limited_color_range)
 		val |= PIPECONF_COLOR_RANGE_SELECT;
 
 	I915_WRITE(PIPECONF(pipe), val);
 	POSTING_READ(PIPECONF(pipe));
 }
 
-static void haswell_set_pipeconf(struct drm_crtc *crtc)
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 val = 0;
 
-	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
+	if (IS_HASWELL(dev_priv) && crtc_state->dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
-	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
@@ -8567,16 +8546,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
 	POSTING_READ(PIPECONF(cpu_transcoder));
 }
 
-static void haswell_set_pipemisc(struct drm_crtc *crtc)
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *config = intel_crtc->config;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 
 	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
 		u32 val = 0;
 
-		switch (intel_crtc->config->pipe_bpp) {
+		switch (crtc_state->pipe_bpp) {
 		case 18:
 			val |= PIPEMISC_DITHER_6_BPC;
 			break;
@@ -8594,14 +8572,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
 			BUG();
 		}
 
-		if (intel_crtc->config->dither)
+		if (crtc_state->dither)
 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 
-		if (config->ycbcr420) {
-			val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
-				PIPEMISC_YUV420_ENABLE |
+		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+		    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+			val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+			val |= PIPEMISC_YUV420_ENABLE |
 				PIPEMISC_YUV420_MODE_FULL_BLEND;
-		}
 
 		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
 	}
@@ -8812,12 +8792,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
-		 * gen < 8) and if DRRS is supported (to make sure the
-		 * registers are not unnecessarily read).
-		 */
-		if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
-			crtc->config->has_drrs) {
+
+		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
@@ -8993,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	fb->width = ((val >> 0) & 0x1fff) + 1;
 
 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
-	stride_mult = intel_fb_stride_alignment(fb, 0);
+	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9049,6 +9025,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 	pipe_config->shared_dpll = NULL;
 
@@ -9356,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 				      struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_atomic_state *state =
 		to_intel_atomic_state(crtc_state->base.state);
 
-	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+	    IS_ICELAKE(dev_priv)) {
 		struct intel_encoder *encoder =
 			intel_get_crtc_new_encoder(state, crtc_state);
 
@@ -9397,30 +9376,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
 	u32 temp;
 
 	/* TODO: TBT pll not implemented. */
-	switch (port) {
-	case PORT_A:
-	case PORT_B:
+	if (intel_port_is_combophy(dev_priv, port)) {
 		temp = I915_READ(DPCLKA_CFGCR0_ICL) &
 		       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 		id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
 
-		if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+		if (WARN_ON(!intel_dpll_is_combophy(id)))
 			return;
-		break;
-	case PORT_C:
-		id = DPLL_ID_ICL_MGPLL1;
-		break;
-	case PORT_D:
-		id = DPLL_ID_ICL_MGPLL2;
-		break;
-	case PORT_E:
-		id = DPLL_ID_ICL_MGPLL3;
-		break;
-	case PORT_F:
-		id = DPLL_ID_ICL_MGPLL4;
-		break;
-	default:
-		MISSING_CASE(port);
+	} else if (intel_port_is_tc(dev_priv, port)) {
+		id = icl_port_to_mg_pll_id(port);
+	} else {
+		WARN(1, "Invalid port %x\n", port);
 		return;
 	}
 
@@ -9510,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum intel_display_power_domain power_domain;
+	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+	unsigned long enabled_panel_transcoders = 0;
+	enum transcoder panel_transcoder;
 	u32 tmp;
 
+	if (IS_ICELAKE(dev_priv))
+		panel_transcoder_mask |=
+			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
 	/*
 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
-	 * transcoder handled below.
+	 * and DSI transcoders handled below.
 	 */
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 
@@ -9522,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
 	 * consistency and less surprising code; it's in always on power).
 	 */
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
-	if (tmp & TRANS_DDI_FUNC_ENABLE) {
-		enum pipe trans_edp_pipe;
+	for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+		enum pipe trans_pipe;
+
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+			continue;
+
+		/*
+		 * Log all enabled ones, only use the first one.
+		 *
+		 * FIXME: This won't work for two separate DSI displays.
+		 */
+		enabled_panel_transcoders |= BIT(panel_transcoder);
+		if (enabled_panel_transcoders != BIT(panel_transcoder))
+			continue;
+
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 		default:
-			WARN(1, "unknown pipe linked to edp transcoder\n");
+			WARN(1, "unknown pipe linked to transcoder %s\n",
+			     transcoder_name(panel_transcoder));
 			/* fall through */
 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
 		case TRANS_DDI_EDP_INPUT_A_ON:
-			trans_edp_pipe = PIPE_A;
+			trans_pipe = PIPE_A;
 			break;
 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
-			trans_edp_pipe = PIPE_B;
+			trans_pipe = PIPE_B;
 			break;
 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
-			trans_edp_pipe = PIPE_C;
+			trans_pipe = PIPE_C;
 			break;
 		}
 
-		if (trans_edp_pipe == crtc->pipe)
-			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+		if (trans_pipe == crtc->pipe)
+			pipe_config->cpu_transcoder = panel_transcoder;
 	}
 
+	/*
+	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+	 */
+	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
@@ -9677,33 +9670,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 	if (!active)
 		goto out;
 
-	if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+	    IS_ICELAKE(dev_priv)) {
 		haswell_get_ddi_port_state(crtc, pipe_config);
 		intel_get_pipe_timings(crtc, pipe_config);
 	}
 
 	intel_get_pipe_src_size(crtc, pipe_config);
+	intel_get_crtc_ycbcr_config(crtc, pipe_config);
 
 	pipe_config->gamma_mode =
 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
 
-	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
-		u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
-		bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
-		if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-			bool blend_mode_420 = tmp &
-					      PIPEMISC_YUV420_MODE_FULL_BLEND;
-
-			pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
-			if (pipe_config->ycbcr420 != clrspace_yuv ||
-			    pipe_config->ycbcr420 != blend_mode_420)
-				DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
-		} else if (clrspace_yuv) {
-			DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
-		}
-	}
-
 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
 		power_domain_mask |= BIT_ULL(power_domain);
@@ -9749,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	u32 base;
 
-	if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
 		base = obj->phys_handle->busaddr;
 	else
 		base = intel_plane_ggtt_offset(plane_state);
@@ -9972,15 +9950,13 @@ static void i845_update_cursor(struct intel_plane *plane,
 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
 	}
 
-	POSTING_READ_FW(CURCNTR(PIPE_A));
-
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void i845_disable_cursor(struct intel_plane *plane,
-				struct intel_crtc *crtc)
+				const struct intel_crtc_state *crtc_state)
 {
-	i845_update_cursor(plane, NULL, NULL);
+	i845_update_cursor(plane, crtc_state, NULL);
 }
 
 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10171,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	 * On some platforms writing CURCNTR first will also
 	 * cause CURPOS to be armed by the CURBASE write.
 	 * Without the CURCNTR write the CURPOS write would
-	 * arm itself. Thus we always start the full update
-	 * with a CURCNTR write.
+	 * arm itself. Thus we always update CURCNTR before
+	 * CURPOS.
 	 *
 	 * On other platforms CURPOS always requires the
 	 * CURBASE write to arm the update. Additonally
@@ -10182,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	 * cursor that doesn't appear to move, or even change
 	 * shape. Thus we always write CURBASE.
 	 *
-	 * CURCNTR and CUR_FBC_CTL are always
-	 * armed by the CURBASE write only.
+	 * The other registers are armed by by the CURBASE write
+	 * except when the plane is getting enabled at which time
+	 * the CURCNTR write arms the update.
 	 */
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_write_cursor_wm(plane, crtc_state);
+
 	if (plane->cursor.base != base ||
 	    plane->cursor.size != fbc_ctl ||
 	    plane->cursor.cntl != cntl) {
-		I915_WRITE_FW(CURCNTR(pipe), cntl);
 		if (HAS_CUR_FBC(dev_priv))
 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+		I915_WRITE_FW(CURCNTR(pipe), cntl);
 		I915_WRITE_FW(CURPOS(pipe), pos);
 		I915_WRITE_FW(CURBASE(pipe), base);
 
@@ -10202,15 +10183,13 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 		I915_WRITE_FW(CURBASE(pipe), base);
 	}
 
-	POSTING_READ_FW(CURBASE(pipe));
-
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void i9xx_disable_cursor(struct intel_plane *plane,
-				struct intel_crtc *crtc)
+				const struct intel_crtc_state *crtc_state)
 {
-	i9xx_update_cursor(plane, NULL, NULL);
+	i9xx_update_cursor(plane, crtc_state, NULL);
 }
 
 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10808,14 +10787,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 		pipe_config->fb_bits |= plane->frontbuffer_bit;
 
 	/*
-	 * WaCxSRDisabledForSpriteScaling:ivb
+	 * ILK/SNB DVSACNTR/Sprite Enable
+	 * IVB SPR_CTL/Sprite Enable
+	 * "When in Self Refresh Big FIFO mode, a write to enable the
+	 *  plane will be internally buffered and delayed while Big FIFO
+	 *  mode is exiting."
 	 *
-	 * cstate->update_wm was already set above, so this flag will
-	 * take effect when we commit and program watermarks.
+	 * Which means that enabling the sprite can take an extra frame
+	 * when we start in big FIFO mode (LP1+). Thus we need to drop
+	 * down to LP0 and wait for vblank in order to make sure the
+	 * sprite gets enabled on the next vblank after the register write.
+	 * Doing otherwise would risk enabling the sprite one frame after
+	 * we've already signalled flip completion. We can resume LP1+
+	 * once the sprite has been enabled.
+	 *
+	 *
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 * IVB SPR_SCALE/Scaling Enable
+	 * "Low Power watermarks must be disabled for at least one
+	 *  frame before enabling sprite scaling, and kept disabled
+	 *  until sprite scaling is disabled."
+	 *
+	 * ILK/SNB DVSASCALE/Scaling Enable
+	 * "When in Self Refresh Big FIFO mode, scaling enable will be
+	 *  masked off while Big FIFO mode is exiting."
+	 *
+	 * Despite the w/a only being listed for IVB we assume that
+	 * the ILK/SNB note has similar ramifications, hence we apply
+	 * the w/a on all three platforms.
 	 */
-	if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
-	    needs_scaling(to_intel_plane_state(plane_state)) &&
-	    !needs_scaling(old_plane_state))
+	if (plane->id == PLANE_SPRITE0 &&
+	    (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+	     IS_IVYBRIDGE(dev_priv)) &&
+	    (turn_on || (!needs_scaling(old_plane_state) &&
+			 needs_scaling(to_intel_plane_state(plane_state)))))
 		pipe_config->disable_lp_wm = true;
 
 	return 0;
@@ -10851,6 +10856,101 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
 	return true;
 }
 
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+	struct intel_plane *plane, *linked;
+	struct intel_plane_state *plane_state, *linked_plane_state;
+	int i;
+
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		linked = plane_state->linked_plane;
+
+		if (!linked)
+			continue;
+
+		linked_plane_state = intel_atomic_get_plane_state(state, linked);
+		if (IS_ERR(linked_plane_state))
+			return PTR_ERR(linked_plane_state);
+
+		WARN_ON(linked_plane_state->linked_plane != plane);
+		WARN_ON(linked_plane_state->slave == plane_state->slave);
+	}
+
+	return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+	struct intel_plane *plane, *linked;
+	struct intel_plane_state *plane_state;
+	int i;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return 0;
+
+	/*
+	 * Destroy all old plane links and make the slave plane invisible
+	 * in the crtc_state->active_planes mask.
+	 */
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+			continue;
+
+		plane_state->linked_plane = NULL;
+		if (plane_state->slave && !plane_state->base.visible) {
+			crtc_state->active_planes &= ~BIT(plane->id);
+			crtc_state->update_planes |= BIT(plane->id);
+		}
+
+		plane_state->slave = false;
+	}
+
+	if (!crtc_state->nv12_planes)
+		return 0;
+
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		struct intel_plane_state *linked_state = NULL;
+
+		if (plane->pipe != crtc->pipe ||
+		    !(crtc_state->nv12_planes & BIT(plane->id)))
+			continue;
+
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+			if (!icl_is_nv12_y_plane(linked->id))
+				continue;
+
+			if (crtc_state->active_planes & BIT(linked->id))
+				continue;
+
+			linked_state = intel_atomic_get_plane_state(state, linked);
+			if (IS_ERR(linked_state))
+				return PTR_ERR(linked_state);
+
+			break;
+		}
+
+		if (!linked_state) {
+			DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+				      hweight8(crtc_state->nv12_planes));
+
+			return -EINVAL;
+		}
+
+		plane_state->linked_plane = linked;
+
+		linked_state->slave = true;
+		linked_state->linked_plane = plane;
+		crtc_state->active_planes |= BIT(linked->id);
+		crtc_state->update_planes |= BIT(linked->id);
+		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+	}
+
+	return 0;
+}
+
 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 				   struct drm_crtc_state *crtc_state)
 {
@@ -10859,7 +10959,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc_state);
-	struct drm_atomic_state *state = crtc_state->state;
 	int ret;
 	bool mode_changed = needs_modeset(crtc_state);
 
@@ -10896,8 +10995,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 		}
 	}
 
-	if (dev_priv->display.compute_intermediate_wm &&
-	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
+	if (dev_priv->display.compute_intermediate_wm) {
 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
 			return 0;
 
@@ -10913,9 +11011,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
 			return ret;
 		}
-	} else if (dev_priv->display.compute_intermediate_wm) {
-		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
-			pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
 	}
 
 	if (INTEL_GEN(dev_priv) >= 9) {
@@ -10923,6 +11018,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 			ret = skl_update_scaler_crtc(pipe_config);
 
 		if (!ret)
+			ret = icl_check_nv12_planes(pipe_config);
+		if (!ret)
 			ret = skl_check_pipe_max_pixel_rate(intel_crtc,
 							    pipe_config);
 		if (!ret)
@@ -10937,8 +11034,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-	.atomic_begin = intel_begin_crtc_commit,
-	.atomic_flush = intel_finish_crtc_commit,
 	.atomic_check = intel_crtc_atomic_check,
 };
 
@@ -10967,30 +11062,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 	drm_connector_list_iter_end(&conn_iter);
 }
 
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
-			   struct intel_crtc_state *pipe_config)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+		      struct intel_crtc_state *pipe_config)
 {
-	const struct drm_display_info *info = &connector->base.display_info;
-	int bpp = pipe_config->pipe_bpp;
+	struct drm_connector *connector = conn_state->connector;
+	const struct drm_display_info *info = &connector->display_info;
+	int bpp;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
-		      connector->base.base.id,
-		      connector->base.name);
-
-	/* Don't use an invalid EDID bpc value */
-	if (info->bpc != 0 && info->bpc * 3 < bpp) {
-		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
-			      bpp, info->bpc * 3);
-		pipe_config->pipe_bpp = info->bpc * 3;
+	switch (conn_state->max_bpc) {
+	case 6 ... 7:
+		bpp = 6 * 3;
+		break;
+	case 8 ... 9:
+		bpp = 8 * 3;
+		break;
+	case 10 ... 11:
+		bpp = 10 * 3;
+		break;
+	case 12:
+		bpp = 12 * 3;
+		break;
+	default:
+		return -EINVAL;
 	}
 
-	/* Clamp bpp to 8 on screens without EDID 1.4 */
-	if (info->bpc == 0 && bpp > 24) {
-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-			      bpp);
-		pipe_config->pipe_bpp = 24;
+	if (bpp < pipe_config->pipe_bpp) {
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+			      connector->base.id, connector->name,
+			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+			      pipe_config->pipe_bpp);
+
+		pipe_config->pipe_bpp = bpp;
 	}
+
+	return 0;
 }
 
 static int
@@ -10998,7 +11105,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
 			  struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct drm_atomic_state *state;
+	struct drm_atomic_state *state = pipe_config->base.state;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
 	int bpp, i;
@@ -11011,21 +11118,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
 	else
 		bpp = 8*3;
 
-
 	pipe_config->pipe_bpp = bpp;
 
-	state = pipe_config->base.state;
-
-	/* Clamp display bpp to EDID value */
+	/* Clamp display bpp to connector max bpp */
 	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		int ret;
+
 		if (connector_state->crtc != &crtc->base)
 			continue;
 
-		connected_sink_compute_bpp(to_intel_connector(connector),
-					   pipe_config);
+		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+		if (ret)
+			return ret;
 	}
 
-	return bpp;
+	return 0;
 }
 
 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11095,6 +11202,20 @@ static void snprintf_output_types(char *buf, size_t len,
 	WARN_ON_ONCE(output_types != 0);
 }
 
+static const char * const output_format_str[] = {
+	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+	if (format >= ARRAY_SIZE(output_format_str))
+		format = INTEL_OUTPUT_FORMAT_INVALID;
+	return output_format_str[format];
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config,
 				   const char *context)
@@ -11114,6 +11235,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
 		      buf, pipe_config->output_types);
 
+	DRM_DEBUG_KMS("output format: %s\n",
+		      output_formats(pipe_config->output_format));
+
 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
 		      transcoder_name(pipe_config->cpu_transcoder),
 		      pipe_config->pipe_bpp, pipe_config->dither);
@@ -11123,9 +11247,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				      pipe_config->fdi_lanes,
 				      &pipe_config->fdi_m_n);
 
-	if (pipe_config->ycbcr420)
-		DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
-
 	if (intel_crtc_has_dp_encoder(pipe_config)) {
 		intel_dump_m_n_config(pipe_config, "dp m_n",
 				pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11314,7 +11435,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
-	int base_bpp, ret = -EINVAL;
+	int base_bpp, ret;
 	int i;
 	bool retry = true;
 
@@ -11336,10 +11457,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
-	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
-					     pipe_config);
-	if (base_bpp < 0)
-		goto fail;
+	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+					pipe_config);
+	if (ret)
+		return ret;
+
+	base_bpp = pipe_config->pipe_bpp;
 
 	/*
 	 * Determine the real pipe dimensions. Note that stereo modes can
@@ -11361,7 +11484,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
 			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-			goto fail;
+			return -EINVAL;
 		}
 
 		/*
@@ -11397,7 +11520,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
 		if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
 			DRM_DEBUG_KMS("Encoder config failure\n");
-			goto fail;
+			return -EINVAL;
 		}
 	}
 
@@ -11408,16 +11531,16 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 			* pipe_config->pixel_multiplier;
 
 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+	if (ret == -EDEADLK)
+		return ret;
 	if (ret < 0) {
 		DRM_DEBUG_KMS("CRTC fixup failed\n");
-		goto fail;
+		return ret;
 	}
 
 	if (ret == RETRY) {
-		if (WARN(!retry, "loop in pipe configuration computation\n")) {
-			ret = -EINVAL;
-			goto fail;
-		}
+		if (WARN(!retry, "loop in pipe configuration computation\n"))
+			return -EINVAL;
 
 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
 		retry = false;
@@ -11433,8 +11556,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-fail:
-	return ret;
+	return 0;
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11703,6 +11825,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
 
 	PIPE_CONF_CHECK_I(pixel_multiplier);
+	PIPE_CONF_CHECK_I(output_format);
 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11711,7 +11834,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
-	PIPE_CONF_CHECK_BOOL(ycbcr420);
 
 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
 
@@ -11833,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct skl_pipe_wm hw_wm, *sw_wm;
 	struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+	struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+	struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11843,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	skl_pipe_wm_get_hw_state(crtc, &hw_wm);
 	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
 
+	skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
@@ -11885,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb.plane[pipe][plane];
-		sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+		hw_ddb_entry = &hw_ddb_y[plane];
+		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11935,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		}
 
 		/* DDB */
-		hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-		sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+		hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12220,8 +12346,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
 	verify_disabled_dpll_state(dev);
 }
 
-static void update_scanline_offset(struct intel_crtc *crtc)
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	/*
@@ -12252,7 +12379,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 	 * answer that's slightly in the future.
 	 */
 	if (IS_GEN2(dev_priv)) {
-		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+		const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
 		int vtotal;
 
 		vtotal = adjusted_mode->crtc_vtotal;
@@ -12261,7 +12388,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 
 		crtc->scanline_offset = vtotal - 1;
 	} else if (HAS_DDI(dev_priv) &&
-		   intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
+		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
 		crtc->scanline_offset = 2;
 	} else
 		crtc->scanline_offset = 1;
@@ -12544,6 +12671,8 @@ static int intel_atomic_check(struct drm_device *dev,
 		}
 
 		ret = intel_modeset_pipe_config(crtc, pipe_config);
+		if (ret == -EDEADLK)
+			return ret;
 		if (ret) {
 			intel_dump_pipe_config(to_intel_crtc(crtc),
 					       pipe_config, "[failed]");
@@ -12575,6 +12704,10 @@ static int intel_atomic_check(struct drm_device *dev,
 		intel_state->cdclk.logical = dev_priv->cdclk.logical;
 	}
 
+	ret = icl_add_linked_planes(intel_state);
+	if (ret)
+		return ret;
+
 	ret = drm_atomic_helper_check_planes(dev, state);
 	if (ret)
 		return ret;
@@ -12614,7 +12747,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 						 to_intel_plane(crtc->primary));
 
 	if (modeset) {
-		update_scanline_offset(intel_crtc);
+		update_scanline_offset(pipe_config);
 		dev_priv->display.crtc_enable(pipe_config, state);
 
 		/* vblanks work again, re-enable pipe CRC. */
@@ -12627,7 +12760,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 	if (new_plane_state)
 		intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
 
-	drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+	intel_begin_crtc_commit(crtc, old_crtc_state);
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+	else
+		i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+
+	intel_finish_crtc_commit(crtc, old_crtc_state);
 }
 
 static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12659,13 +12799,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 	int i;
 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
 	u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-
-	const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
 		/* ignore allocations for crtc's that have been turned off. */
 		if (new_crtc_state->active)
-			entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+			entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
 
 	/* If 2nd DBuf slice required, enable it here */
 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12691,14 +12830,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 			if (updated & cmask || !cstate->base.active)
 				continue;
 
-			if (skl_ddb_allocation_overlaps(dev_priv,
+			if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
 							entries,
-							&cstate->wm.skl.ddb,
-							i))
+							INTEL_INFO(dev_priv)->num_pipes, i))
 				continue;
 
 			updated |= cmask;
-			entries[i] = &cstate->wm.skl.ddb;
+			entries[i] = cstate->wm.skl.ddb;
 
 			/*
 			 * If this is an already active pipe, it's DDB changed,
@@ -12788,8 +12926,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
 	struct drm_crtc *crtc;
-	struct intel_crtc_state *intel_cstate;
+	struct intel_crtc *intel_crtc;
 	u64 put_domains[I915_MAX_PIPES] = {};
 	int i;
 
@@ -12801,24 +12940,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+		intel_crtc = to_intel_crtc(crtc);
 
 		if (needs_modeset(new_crtc_state) ||
 		    to_intel_crtc_state(new_crtc_state)->update_pipe) {
 
-			put_domains[to_intel_crtc(crtc)->pipe] =
+			put_domains[intel_crtc->pipe] =
 				modeset_get_crtc_power_domains(crtc,
-					to_intel_crtc_state(new_crtc_state));
+					new_intel_crtc_state);
 		}
 
 		if (!needs_modeset(new_crtc_state))
 			continue;
 
-		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
-				       to_intel_crtc_state(new_crtc_state));
+		intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
 
 		if (old_crtc_state->active) {
-			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+			intel_crtc_disable_planes(intel_state, intel_crtc);
 
 			/*
 			 * We need to disable pipe CRC before disabling the pipe,
@@ -12826,10 +12966,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 			 */
 			intel_crtc_disable_pipe_crc(intel_crtc);
 
-			dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
+			dev_priv->display.crtc_disable(old_intel_crtc_state, state);
 			intel_crtc->active = false;
 			intel_fbc_disable(intel_crtc);
-			intel_disable_shared_dpll(intel_crtc);
+			intel_disable_shared_dpll(old_intel_crtc_state);
 
 			/*
 			 * Underruns don't always raise
@@ -12843,7 +12983,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 			    !HAS_GMCH_DISPLAY(dev_priv) &&
 			    dev_priv->display.initial_watermarks)
 				dev_priv->display.initial_watermarks(intel_state,
-								     to_intel_crtc_state(new_crtc_state));
+								     new_intel_crtc_state);
 		}
 	}
 
@@ -12902,11 +13042,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 * TODO: Move this (and other cleanup) to an async worker eventually.
 	 */
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-		intel_cstate = to_intel_crtc_state(new_crtc_state);
+		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
 
 		if (dev_priv->display.optimize_watermarks)
 			dev_priv->display.optimize_watermarks(intel_state,
-							      intel_cstate);
+							      new_intel_crtc_state);
 	}
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13173,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
 	struct i915_vma *vma;
 
 	if (plane->id == PLANE_CURSOR &&
-	    INTEL_INFO(dev_priv)->cursor_needs_physical) {
+	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 		const int align = intel_cursor_alignment(dev_priv);
 		int err;
@@ -13289,13 +13429,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	fb_obj_bump_render_priority(obj);
-
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
 	if (ret)
 		return ret;
 
+	fb_obj_bump_render_priority(obj);
 	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
 
 	if (!new_state->fence) { /* implicit fencing */
@@ -13426,7 +13565,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 	if (intel_cstate->update_pipe)
 		intel_update_pipe_config(old_intel_cstate, intel_cstate);
 	else if (INTEL_GEN(dev_priv) >= 9)
-		skl_detach_scalers(intel_crtc);
+		skl_detach_scalers(intel_cstate);
 
 out:
 	if (dev_priv->display.atomic_update_watermarks)
@@ -13528,56 +13667,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
 	}
 }
 
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
-					   u32 format, u64 modifier)
-{
-	struct intel_plane *plane = to_intel_plane(_plane);
-
-	switch (modifier) {
-	case DRM_FORMAT_MOD_LINEAR:
-	case I915_FORMAT_MOD_X_TILED:
-	case I915_FORMAT_MOD_Y_TILED:
-	case I915_FORMAT_MOD_Yf_TILED:
-		break;
-	case I915_FORMAT_MOD_Y_TILED_CCS:
-	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		if (!plane->has_ccs)
-			return false;
-		break;
-	default:
-		return false;
-	}
-
-	switch (format) {
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-		if (is_ccs_modifier(modifier))
-			return true;
-		/* fall through */
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-	case DRM_FORMAT_NV12:
-		if (modifier == I915_FORMAT_MOD_Yf_TILED)
-			return true;
-		/* fall through */
-	case DRM_FORMAT_C8:
-		if (modifier == DRM_FORMAT_MOD_LINEAR ||
-		    modifier == I915_FORMAT_MOD_X_TILED ||
-		    modifier == I915_FORMAT_MOD_Y_TILED)
-			return true;
-		/* fall through */
-	default:
-		return false;
-	}
-}
-
 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
 					      u32 format, u64 modifier)
 {
@@ -13585,18 +13674,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
 		format == DRM_FORMAT_ARGB8888;
 }
 
-static struct drm_plane_funcs skl_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = intel_plane_destroy,
-	.atomic_get_property = intel_plane_atomic_get_property,
-	.atomic_set_property = intel_plane_atomic_set_property,
-	.atomic_duplicate_state = intel_plane_duplicate_state,
-	.atomic_destroy_state = intel_plane_destroy_state,
-	.format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static struct drm_plane_funcs i965_plane_funcs = {
+static const struct drm_plane_funcs i965_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = intel_plane_destroy,
@@ -13607,7 +13685,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
 	.format_mod_supported = i965_plane_format_mod_supported,
 };
 
-static struct drm_plane_funcs i8xx_plane_funcs = {
+static const struct drm_plane_funcs i8xx_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = intel_plane_destroy,
@@ -13633,14 +13711,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *old_fb;
-	struct drm_crtc_state *crtc_state = crtc->state;
+	struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->state);
+	struct intel_crtc_state *new_crtc_state;
 
 	/*
 	 * When crtc is inactive or there is a modeset pending,
 	 * wait for it to complete in the slowpath
 	 */
-	if (!crtc_state->active || needs_modeset(crtc_state) ||
-	    to_intel_crtc_state(crtc_state)->update_pipe)
+	if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+	    crtc_state->update_pipe)
 		goto slow;
 
 	old_plane_state = plane->state;
@@ -13670,6 +13750,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	if (!new_plane_state)
 		return -ENOMEM;
 
+	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+	if (!new_crtc_state) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
 	drm_atomic_set_fb_for_plane(new_plane_state, fb);
 
 	new_plane_state->src_x = src_x;
@@ -13681,9 +13767,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	new_plane_state->crtc_w = crtc_w;
 	new_plane_state->crtc_h = crtc_h;
 
-	ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
-						  to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
-						  to_intel_plane_state(plane->state),
+	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+						  to_intel_plane_state(old_plane_state),
 						  to_intel_plane_state(new_plane_state));
 	if (ret)
 		goto out_free;
@@ -13705,14 +13790,25 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	/* Swap plane state */
 	plane->state = new_plane_state;
 
+	/*
+	 * We cannot swap crtc_state as it may be in use by an atomic commit or
+	 * page flip that's running simultaneously. If we swap crtc_state and
+	 * destroy the old state, we will cause a use-after-free there.
+	 *
+	 * Only update active_planes, which is needed for our internal
+	 * bookkeeping. Either value will do the right thing when updating
+	 * planes atomically. If the cursor was part of the atomic update then
+	 * we would have taken the slowpath.
+	 */
+	crtc_state->active_planes = new_crtc_state->active_planes;
+
 	if (plane->state->visible) {
 		trace_intel_update_plane(plane, to_intel_crtc(crtc));
-		intel_plane->update_plane(intel_plane,
-					  to_intel_crtc_state(crtc->state),
+		intel_plane->update_plane(intel_plane, crtc_state,
 					  to_intel_plane_state(plane->state));
 	} else {
 		trace_intel_disable_plane(plane, to_intel_crtc(crtc));
-		intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+		intel_plane->disable_plane(intel_plane, crtc_state);
 	}
 
 	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -13720,6 +13816,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 out_free:
+	if (new_crtc_state)
+		intel_crtc_destroy_state(crtc, &new_crtc_state->base);
 	if (ret)
 		intel_plane_destroy_state(plane, new_plane_state);
 	else
@@ -13760,176 +13858,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
 		return i9xx_plane == PLANE_A;
 }
 
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
-			      enum pipe pipe, enum plane_id plane_id)
-{
-	if (!HAS_FBC(dev_priv))
-		return false;
-
-	return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
-			  enum pipe pipe, enum plane_id plane_id)
-{
-	/*
-	 * FIXME: ICL requires two hardware planes for scanning out NV12
-	 * framebuffers. Do not advertize support until this is implemented.
-	 */
-	if (INTEL_GEN(dev_priv) >= 11)
-		return false;
-
-	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-		return false;
-
-	if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
-		return false;
-
-	if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
-		return false;
-
-	return true;
-}
-
 static struct intel_plane *
 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	struct intel_plane *primary = NULL;
-	struct intel_plane_state *state = NULL;
+	struct intel_plane *plane;
 	const struct drm_plane_funcs *plane_funcs;
-	const uint32_t *intel_primary_formats;
 	unsigned int supported_rotations;
-	unsigned int num_formats;
-	const uint64_t *modifiers;
+	unsigned int possible_crtcs;
+	const u64 *modifiers;
+	const u32 *formats;
+	int num_formats;
 	int ret;
 
-	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-	if (!primary) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	state = intel_create_plane_state(&primary->base);
-	if (!state) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	primary->base.state = &state->base;
-
 	if (INTEL_GEN(dev_priv) >= 9)
-		state->scaler_id = -1;
-	primary->pipe = pipe;
+		return skl_universal_plane_create(dev_priv, pipe,
+						  PLANE_PRIMARY);
+
+	plane = intel_plane_alloc();
+	if (IS_ERR(plane))
+		return plane;
+
+	plane->pipe = pipe;
 	/*
 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
 	 */
 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
-		primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
+		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
 	else
-		primary->i9xx_plane = (enum i9xx_plane_id) pipe;
-	primary->id = PLANE_PRIMARY;
-	primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+	plane->id = PLANE_PRIMARY;
+	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		primary->has_fbc = skl_plane_has_fbc(dev_priv,
-						     primary->pipe,
-						     primary->id);
-	else
-		primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
-						      primary->i9xx_plane);
-
-	if (primary->has_fbc) {
+	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+	if (plane->has_fbc) {
 		struct intel_fbc *fbc = &dev_priv->fbc;
 
-		fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
 	}
 
-	if (INTEL_GEN(dev_priv) >= 9) {
-		primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
-						     PLANE_PRIMARY);
-
-		if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
-			intel_primary_formats = skl_pri_planar_formats;
-			num_formats = ARRAY_SIZE(skl_pri_planar_formats);
-		} else {
-			intel_primary_formats = skl_primary_formats;
-			num_formats = ARRAY_SIZE(skl_primary_formats);
-		}
-
-		if (primary->has_ccs)
-			modifiers = skl_format_modifiers_ccs;
-		else
-			modifiers = skl_format_modifiers_noccs;
-
-		primary->max_stride = skl_plane_max_stride;
-		primary->update_plane = skl_update_plane;
-		primary->disable_plane = skl_disable_plane;
-		primary->get_hw_state = skl_plane_get_hw_state;
-		primary->check_plane = skl_plane_check;
-
-		plane_funcs = &skl_plane_funcs;
-	} else if (INTEL_GEN(dev_priv) >= 4) {
-		intel_primary_formats = i965_primary_formats;
+	if (INTEL_GEN(dev_priv) >= 4) {
+		formats = i965_primary_formats;
 		num_formats = ARRAY_SIZE(i965_primary_formats);
 		modifiers = i9xx_format_modifiers;
 
-		primary->max_stride = i9xx_plane_max_stride;
-		primary->update_plane = i9xx_update_plane;
-		primary->disable_plane = i9xx_disable_plane;
-		primary->get_hw_state = i9xx_plane_get_hw_state;
-		primary->check_plane = i9xx_plane_check;
+		plane->max_stride = i9xx_plane_max_stride;
+		plane->update_plane = i9xx_update_plane;
+		plane->disable_plane = i9xx_disable_plane;
+		plane->get_hw_state = i9xx_plane_get_hw_state;
+		plane->check_plane = i9xx_plane_check;
 
 		plane_funcs = &i965_plane_funcs;
 	} else {
-		intel_primary_formats = i8xx_primary_formats;
+		formats = i8xx_primary_formats;
 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
 		modifiers = i9xx_format_modifiers;
 
-		primary->max_stride = i9xx_plane_max_stride;
-		primary->update_plane = i9xx_update_plane;
-		primary->disable_plane = i9xx_disable_plane;
-		primary->get_hw_state = i9xx_plane_get_hw_state;
-		primary->check_plane = i9xx_plane_check;
+		plane->max_stride = i9xx_plane_max_stride;
+		plane->update_plane = i9xx_update_plane;
+		plane->disable_plane = i9xx_disable_plane;
+		plane->get_hw_state = i9xx_plane_get_hw_state;
+		plane->check_plane = i9xx_plane_check;
 
 		plane_funcs = &i8xx_plane_funcs;
 	}
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-					       0, plane_funcs,
-					       intel_primary_formats, num_formats,
-					       modifiers,
-					       DRM_PLANE_TYPE_PRIMARY,
-					       "plane 1%c", pipe_name(pipe));
-	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-					       0, plane_funcs,
-					       intel_primary_formats, num_formats,
-					       modifiers,
+	possible_crtcs = BIT(pipe);
+
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+					       possible_crtcs, plane_funcs,
+					       formats, num_formats, modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "primary %c", pipe_name(pipe));
 	else
-		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-					       0, plane_funcs,
-					       intel_primary_formats, num_formats,
-					       modifiers,
+		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+					       possible_crtcs, plane_funcs,
+					       formats, num_formats, modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "plane %c",
-					       plane_name(primary->i9xx_plane));
+					       plane_name(plane->i9xx_plane));
 	if (ret)
 		goto fail;
 
-	if (INTEL_GEN(dev_priv) >= 10) {
-		supported_rotations =
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
-			DRM_MODE_REFLECT_X;
-	} else if (INTEL_GEN(dev_priv) >= 9) {
-		supported_rotations =
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-	} else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 		supported_rotations =
 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
 			DRM_MODE_REFLECT_X;
@@ -13941,26 +13953,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	}
 
 	if (INTEL_GEN(dev_priv) >= 4)
-		drm_plane_create_rotation_property(&primary->base,
+		drm_plane_create_rotation_property(&plane->base,
 						   DRM_MODE_ROTATE_0,
 						   supported_rotations);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		drm_plane_create_color_properties(&primary->base,
-						  BIT(DRM_COLOR_YCBCR_BT601) |
-						  BIT(DRM_COLOR_YCBCR_BT709),
-						  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
-						  BIT(DRM_COLOR_YCBCR_FULL_RANGE),
-						  DRM_COLOR_YCBCR_BT709,
-						  DRM_COLOR_YCBCR_LIMITED_RANGE);
+	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
 
-	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
-
-	return primary;
+	return plane;
 
 fail:
-	kfree(state);
-	kfree(primary);
+	intel_plane_free(plane);
 
 	return ERR_PTR(ret);
 }
@@ -13969,23 +13971,13 @@ static struct intel_plane *
 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 			  enum pipe pipe)
 {
-	struct intel_plane *cursor = NULL;
-	struct intel_plane_state *state = NULL;
+	unsigned int possible_crtcs;
+	struct intel_plane *cursor;
 	int ret;
 
-	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
-	if (!cursor) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	state = intel_create_plane_state(&cursor->base);
-	if (!state) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	cursor->base.state = &state->base;
+	cursor = intel_plane_alloc();
+	if (IS_ERR(cursor))
+		return cursor;
 
 	cursor->pipe = pipe;
 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -14012,8 +14004,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
 		cursor->cursor.size = ~0;
 
+	possible_crtcs = BIT(pipe);
+
 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
-				       0, &intel_cursor_plane_funcs,
+				       possible_crtcs, &intel_cursor_plane_funcs,
 				       intel_cursor_formats,
 				       ARRAY_SIZE(intel_cursor_formats),
 				       cursor_format_modifiers,
@@ -14028,16 +14022,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 						   DRM_MODE_ROTATE_0 |
 						   DRM_MODE_ROTATE_180);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		state->scaler_id = -1;
-
 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
 	return cursor;
 
 fail:
-	kfree(state);
-	kfree(cursor);
+	intel_plane_free(cursor);
 
 	return ERR_PTR(ret);
 }
@@ -14058,7 +14048,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 		struct intel_scaler *scaler = &scaler_state->scalers[i];
 
 		scaler->in_use = 0;
-		scaler->mode = PS_SCALER_MODE_DYN;
+		scaler->mode = 0;
 	}
 
 	scaler_state->scaler_id = -1;
@@ -14153,18 +14143,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 	return ret;
 }
 
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-{
-	struct drm_device *dev = connector->base.dev;
-
-	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
-	if (!connector->base.state->crtc)
-		return INVALID_PIPE;
-
-	return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file)
 {
@@ -14281,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
 	intel_pps_init(dev_priv);
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return;
 
 	/*
@@ -14301,6 +14279,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 		intel_ddi_init(dev_priv, PORT_D);
 		intel_ddi_init(dev_priv, PORT_E);
 		intel_ddi_init(dev_priv, PORT_F);
+		icl_dsi_init(dev_priv);
 	} else if (IS_GEN9_LP(dev_priv)) {
 		/*
 		 * FIXME: Broxton doesn't support port detection via the
@@ -14523,7 +14502,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 
 static
 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
-			 uint64_t fb_modifier, uint32_t pixel_format)
+			 u32 pixel_format, u64 fb_modifier)
 {
 	struct intel_crtc *crtc;
 	struct intel_plane *plane;
@@ -14545,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct drm_framebuffer *fb = &intel_fb->base;
-	struct drm_format_name_buf format_name;
 	u32 pitch_limit;
 	unsigned int tiling, stride;
 	int ret = -EINVAL;
@@ -14576,33 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		}
 	}
 
-	/* Passed in modifier sanity checking. */
-	switch (mode_cmd->modifier[0]) {
-	case I915_FORMAT_MOD_Y_TILED_CCS:
-	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		switch (mode_cmd->pixel_format) {
-		case DRM_FORMAT_XBGR8888:
-		case DRM_FORMAT_ABGR8888:
-		case DRM_FORMAT_XRGB8888:
-		case DRM_FORMAT_ARGB8888:
-			break;
-		default:
-			DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
-			goto err;
-		}
-		/* fall through */
-	case I915_FORMAT_MOD_Y_TILED:
-	case I915_FORMAT_MOD_Yf_TILED:
-		if (INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
-				      mode_cmd->modifier[0]);
-			goto err;
-		}
-	case DRM_FORMAT_MOD_LINEAR:
-	case I915_FORMAT_MOD_X_TILED:
-		break;
-	default:
-		DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+	if (!drm_any_plane_has_format(&dev_priv->drm,
+				      mode_cmd->pixel_format,
+				      mode_cmd->modifier[0])) {
+		struct drm_format_name_buf format_name;
+
+		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+			      drm_get_format_name(mode_cmd->pixel_format,
+						  &format_name),
 			      mode_cmd->modifier[0]);
 		goto err;
 	}
@@ -14617,8 +14576,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		goto err;
 	}
 
-	pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
-					   mode_cmd->pixel_format);
+	pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
+					   mode_cmd->modifier[0]);
 	if (mode_cmd->pitches[0] > pitch_limit) {
 		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
 			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14637,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		goto err;
 	}
 
-	/* Reject formats not supported by any plane early. */
-	switch (mode_cmd->pixel_format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_ARGB8888:
-		break;
-	case DRM_FORMAT_XRGB1555:
-		if (INTEL_GEN(dev_priv) > 3) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_ABGR8888:
-		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-		    INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-		if (INTEL_GEN(dev_priv) < 4) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_ABGR2101010:
-		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_VYUY:
-		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-			goto err;
-		}
-		break;
-	case DRM_FORMAT_NV12:
-		if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
-		    IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
-			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-				      drm_get_format_name(mode_cmd->pixel_format,
-							  &format_name));
-			goto err;
-		}
-		break;
-	default:
-		DRM_DEBUG_KMS("unsupported pixel format: %s\n",
-			      drm_get_format_name(mode_cmd->pixel_format, &format_name));
-		goto err;
-	}
-
 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
 	if (mode_cmd->offsets[0] != 0)
 		goto err;
@@ -14971,174 +14867,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 		dev_priv->display.update_crtcs = intel_update_crtcs;
 }
 
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
-	DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
-	DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
-	DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
-	DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
-	DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
-	int device;
-	int subsystem_vendor;
-	int subsystem_device;
-	void (*hook)(struct drm_device *dev);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
-	void (*hook)(struct drm_device *dev);
-	const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
-	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
-	return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
-	{
-		.dmi_id_list = &(const struct dmi_system_id[]) {
-			{
-				.callback = intel_dmi_reverse_brightness,
-				.ident = "NCR Corporation",
-				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
-					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
-				},
-			},
-			{ }  /* terminating entry */
-		},
-		.hook = quirk_invert_brightness,
-	},
-};
-
-static struct intel_quirk intel_quirks[] = {
-	/* Lenovo U160 cannot use SSC on LVDS */
-	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
-	/* Sony Vaio Y cannot use SSC on LVDS */
-	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
-	/* Acer Aspire 5734Z must invert backlight brightness */
-	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-	/* Acer/eMachines G725 */
-	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-	/* Acer/eMachines e725 */
-	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-	/* Acer/Packard Bell NCL20 */
-	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-	/* Acer Aspire 4736Z */
-	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
-	/* Acer Aspire 5336 */
-	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
-	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
-	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
-	/* Acer C720 Chromebook (Core i3 4005U) */
-	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
-	/* Apple Macbook 2,1 (Core 2 T7400) */
-	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
-	/* Apple Macbook 4,1 */
-	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
-	/* Toshiba CB35 Chromebook (Celeron 2955U) */
-	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
-	/* HP Chromebook 14 (Celeron 2955U) */
-	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
-	/* Dell Chromebook 11 */
-	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
-	/* Dell Chromebook 11 (2015 version) */
-	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
-	/* Toshiba Satellite P50-C-18C */
-	{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
-	/* GeminiLake NUC */
-	{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-	{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-	/* ASRock ITX*/
-	{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-	{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-static void intel_init_quirks(struct drm_device *dev)
-{
-	struct pci_dev *d = dev->pdev;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
-		struct intel_quirk *q = &intel_quirks[i];
-
-		if (d->device == q->device &&
-		    (d->subsystem_vendor == q->subsystem_vendor ||
-		     q->subsystem_vendor == PCI_ANY_ID) &&
-		    (d->subsystem_device == q->subsystem_device ||
-		     q->subsystem_device == PCI_ANY_ID))
-			q->hook(dev);
-	}
-	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
-		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
-			intel_dmi_quirks[i].hook(dev);
-	}
-}
-
 /* Disable the VGA plane that we never use */
 static void i915_disable_vga(struct drm_i915_private *dev_priv)
 {
@@ -15352,7 +15080,9 @@ int intel_modeset_init(struct drm_device *dev)
 	INIT_WORK(&dev_priv->atomic_helper.free_work,
 		  intel_atomic_helper_free_state_worker);
 
-	intel_init_quirks(dev);
+	intel_init_quirks(dev_priv);
+
+	intel_fbc_init(dev_priv);
 
 	intel_init_pm(dev_priv);
 
@@ -15584,8 +15314,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
 		if (pipe == crtc->pipe)
 			continue;
 
-		DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
-			      plane->base.name);
+		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+			      plane->base.base.id, plane->base.name);
 
 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 		intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15626,7 +15356,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
 	if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15636,7 +15367,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 	}
 
-	if (crtc->active) {
+	if (crtc_state->base.active) {
 		struct intel_plane *plane;
 
 		/* Disable everything but the primary plane */
@@ -15652,10 +15383,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	if (crtc->active && !intel_crtc_has_encoders(crtc))
+	if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
 		intel_crtc_disable_noatomic(&crtc->base, ctx);
 
-	if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
+	if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
 		/*
 		 * We start out with underrun reporting disabled to avoid races.
 		 * For correct bookkeeping mark this on active crtcs.
@@ -15686,6 +15417,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_connector *connector;
 
 	/* We need to check both for a crtc link (meaning that the
@@ -15709,7 +15441,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
 				      encoder->base.base.id,
 				      encoder->base.name);
-			encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+			if (encoder->disable)
+				encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 			if (encoder->post_disable)
 				encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 		}
@@ -15726,6 +15459,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 	/* notify opregion of the sanitized encoder state */
 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_sanitize_encoder_pll_mapping(encoder);
 }
 
 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15774,6 +15510,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
 		crtc_state = to_intel_crtc_state(crtc->base.state);
 
 		intel_set_plane_visible(crtc_state, plane_state, visible);
+
+		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+			      plane->base.base.id, plane->base.name,
+			      enableddisabled(visible), pipe_name(pipe));
 	}
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15926,7 +15666,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
 			drm_calc_timestamping_constants(&crtc->base,
 							&crtc_state->base.adjusted_mode);
-			update_scanline_offset(crtc);
+			update_scanline_offset(crtc_state);
 		}
 
 		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15981,6 +15721,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
 	}
 }
 
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+				       enum port port, i915_reg_t hdmi_reg)
+{
+	u32 val = I915_READ(hdmi_reg);
+
+	if (val & SDVO_ENABLE ||
+	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+		return;
+
+	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+		      port_name(port));
+
+	val &= ~SDVO_PIPE_SEL_MASK;
+	val |= SDVO_PIPE_SEL(PIPE_A);
+
+	I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+				     enum port port, i915_reg_t dp_reg)
+{
+	u32 val = I915_READ(dp_reg);
+
+	if (val & DP_PORT_EN ||
+	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+		return;
+
+	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+		      port_name(port));
+
+	val &= ~DP_PIPE_SEL_MASK;
+	val |= DP_PIPE_SEL(PIPE_A);
+
+	I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * The BIOS may select transcoder B on some of the PCH
+	 * ports even it doesn't enable the port. This would trip
+	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+	 * Sanitize the transcoder select bits to prevent that. We
+	 * assume that the BIOS never actually enabled the port,
+	 * because if it did we'd actually have to toggle the port
+	 * on and back off to make the transcoder A select stick
+	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
+	 * intel_disable_sdvo()).
+	 */
+	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+	/* PCH SDVOB multiplex with HDMIB */
+	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
 /* Scan out the current hw modeset state,
  * and sanitizes it to the current state
  */
@@ -15990,6 +15789,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *crtc;
+	struct intel_crtc_state *crtc_state;
 	struct intel_encoder *encoder;
 	int i;
 
@@ -16001,6 +15801,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	/* HW state is read out, now we need to sanitize this mess. */
 	get_encoder_power_domains(dev_priv);
 
+	if (HAS_PCH_IBX(dev_priv))
+		ibx_sanitize_pch_ports(dev_priv);
+
 	/*
 	 * intel_sanitize_plane_mapping() may need to do vblank
 	 * waits, so we need vblank interrupts restored beforehand.
@@ -16008,7 +15811,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		drm_crtc_vblank_reset(&crtc->base);
 
-		if (crtc->active)
+		if (crtc->base.state->active)
 			drm_crtc_vblank_on(&crtc->base);
 	}
 
@@ -16018,8 +15821,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		intel_sanitize_encoder(encoder);
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		crtc_state = to_intel_crtc_state(crtc->base.state);
 		intel_sanitize_crtc(crtc, ctx);
-		intel_dump_pipe_config(crtc, crtc->config,
+		intel_dump_pipe_config(crtc, crtc_state,
 				       "[setup_hw_state]");
 	}
 
@@ -16053,7 +15857,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	for_each_intel_crtc(dev, crtc) {
 		u64 put_domains;
 
-		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
+		crtc_state = to_intel_crtc_state(crtc->base.state);
+		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
 		if (WARN_ON(put_domains))
 			modeset_put_power_domains(dev_priv, put_domains);
 	}
@@ -16097,29 +15902,6 @@ void intel_display_resume(struct drm_device *dev)
 		drm_atomic_state_put(state);
 }
 
-int intel_connector_register(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	int ret;
-
-	ret = intel_backlight_device_register(intel_connector);
-	if (ret)
-		goto err;
-
-	return 0;
-
-err:
-	return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-
-	intel_backlight_device_unregister(intel_connector);
-	intel_panel_destroy_backlight(connector);
-}
-
 static void intel_hpd_poll_fini(struct drm_device *dev)
 {
 	struct intel_connector *connector;
@@ -16130,9 +15912,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
 	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (connector->modeset_retry_work.func)
 			cancel_work_sync(&connector->modeset_retry_work);
-		if (connector->hdcp_shim) {
-			cancel_delayed_work_sync(&connector->hdcp_check_work);
-			cancel_work_sync(&connector->hdcp_prop_work);
+		if (connector->hdcp.shim) {
+			cancel_delayed_work_sync(&connector->hdcp.check_work);
+			cancel_work_sync(&connector->hdcp.prop_work);
 		}
 	}
 	drm_connector_list_iter_end(&conn_iter);
@@ -16172,18 +15954,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
 	drm_mode_config_cleanup(dev);
 
-	intel_cleanup_overlay(dev_priv);
+	intel_overlay_cleanup(dev_priv);
 
 	intel_teardown_gmbus(dev_priv);
 
 	destroy_workqueue(dev_priv->modeset_wq);
-}
 
-void intel_connector_attach_encoder(struct intel_connector *connector,
-				    struct intel_encoder *encoder)
-{
-	connector->encoder = encoder;
-	drm_connector_attach_encoder(&connector->base, &encoder->base);
+	intel_fbc_cleanup_cfb(dev_priv);
 }
 
 /*
@@ -16273,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 	};
 	int i;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return NULL;
 
 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e..4262452 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -43,6 +43,11 @@ enum i915_gpio {
 	GPIOM,
 };
 
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
 enum pipe {
 	INVALID_PIPE = -1,
 
@@ -57,12 +62,25 @@ enum pipe {
 #define pipe_name(p) ((p) + 'A')
 
 enum transcoder {
-	TRANSCODER_A = 0,
-	TRANSCODER_B,
-	TRANSCODER_C,
+	/*
+	 * The following transcoders have a 1:1 transcoder -> pipe mapping,
+	 * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+	 * rest have consecutive values and match the enum values of the pipes
+	 * they map to.
+	 */
+	TRANSCODER_A = PIPE_A,
+	TRANSCODER_B = PIPE_B,
+	TRANSCODER_C = PIPE_C,
+
+	/*
+	 * The following transcoders can map to any pipe, their enum value
+	 * doesn't need to stay fixed.
+	 */
 	TRANSCODER_EDP,
-	TRANSCODER_DSI_A,
-	TRANSCODER_DSI_C,
+	TRANSCODER_DSI_0,
+	TRANSCODER_DSI_1,
+	TRANSCODER_DSI_A = TRANSCODER_DSI_0,	/* legacy DSI */
+	TRANSCODER_DSI_C = TRANSCODER_DSI_1,	/* legacy DSI */
 
 	I915_MAX_TRANSCODERS
 };
@@ -120,6 +138,9 @@ enum plane_id {
 	PLANE_SPRITE0,
 	PLANE_SPRITE1,
 	PLANE_SPRITE2,
+	PLANE_SPRITE3,
+	PLANE_SPRITE4,
+	PLANE_SPRITE5,
 	PLANE_CURSOR,
 
 	I915_MAX_PLANES,
@@ -221,6 +242,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_TRANSCODER_EDP_VDSC,
 	POWER_DOMAIN_TRANSCODER_DSI_A,
 	POWER_DOMAIN_TRANSCODER_DSI_C,
 	POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -363,7 +385,7 @@ struct intel_link_m_n {
 		(__dev_priv)->power_domains.power_well_count;		\
 	     (__power_well)++)
 
-#define for_each_power_well_rev(__dev_priv, __power_well)			\
+#define for_each_power_well_reverse(__dev_priv, __power_well)			\
 	for ((__power_well) = (__dev_priv)->power_domains.power_wells +		\
 			      (__dev_priv)->power_domains.power_well_count - 1;	\
 	     (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;	\
@@ -373,10 +395,18 @@ struct intel_link_m_n {
 	for_each_power_well(__dev_priv, __power_well)				\
 		for_each_if((__power_well)->desc->domains & (__domain_mask))
 
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
-	for_each_power_well_rev(__dev_priv, __power_well)		        \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+	for_each_power_well_reverse(__dev_priv, __power_well)		        \
 		for_each_if((__power_well)->desc->domains & (__domain_mask))
 
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+		     ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+		      (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+	     (__i)++) \
+		for_each_if(plane)
+
 #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
 	for ((__i) = 0; \
 	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -402,10 +432,18 @@ struct intel_link_m_n {
 	     (__i)++) \
 		for_each_if(plane)
 
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->base.dev->mode_config.num_crtc && \
+		     ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+		      (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+		      (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+	     (__i)++) \
+		for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
 			    int pixel_clock, int link_clock,
 			    struct intel_link_m_n *m_n,
 			    bool constant_n);
-
 bool is_ccs_modifier(u64 modifier);
 #endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56..fdd2cbc 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,19 @@
 
 #define DP_DPRX_ESI_LEN 14
 
+/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
+#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER	61440
+#define DP_DSC_MIN_SUPPORTED_BPC		8
+#define DP_DSC_MAX_SUPPORTED_BPC		10
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE			2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1		400000
+
+/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR		976
+
 /* Compliance test status bits  */
 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +106,14 @@ static const struct dp_link_dpll chv_dpll[] = {
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
 };
 
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
 /**
  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -222,138 +243,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
 	return DIV_ROUND_UP(pixel_clock * bpp, 8);
 }
 
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum port port = intel_dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	u32 ln0, ln1, lane_info;
-
-	if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
-		return;
-
-	ln0 = I915_READ(MG_DP_MODE(port, 0));
-	ln1 = I915_READ(MG_DP_MODE(port, 1));
-
-	switch (intel_dig_port->tc_type) {
-	case TC_PORT_TYPEC:
-		ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-		ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-
-		lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
-			     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
-			    DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
-		switch (lane_info) {
-		case 0x1:
-		case 0x4:
-			break;
-		case 0x2:
-			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
-			break;
-		case 0x3:
-			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
-			       MG_DP_MODE_CFG_DP_X2_MODE;
-			break;
-		case 0x8:
-			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
-			break;
-		case 0xC:
-			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
-			       MG_DP_MODE_CFG_DP_X2_MODE;
-			break;
-		case 0xF:
-			ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
-			       MG_DP_MODE_CFG_DP_X2_MODE;
-			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
-			       MG_DP_MODE_CFG_DP_X2_MODE;
-			break;
-		default:
-			MISSING_CASE(lane_info);
-		}
-		break;
-
-	case TC_PORT_LEGACY:
-		ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
-		ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
-		break;
-
-	default:
-		MISSING_CASE(intel_dig_port->tc_type);
-		return;
-	}
-
-	I915_WRITE(MG_DP_MODE(port, 0), ln0);
-	I915_WRITE(MG_DP_MODE(port, 1), ln1);
-}
-
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum port port = dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
-	u32 val;
-	int i;
-
-	if (tc_port == PORT_TC_NONE)
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
-		val = I915_READ(mg_regs[i]);
-		val |= MG_DP_MODE_CFG_TR2PWR_GATING |
-		       MG_DP_MODE_CFG_TRPWR_GATING |
-		       MG_DP_MODE_CFG_CLNPWR_GATING |
-		       MG_DP_MODE_CFG_DIGPWR_GATING |
-		       MG_DP_MODE_CFG_GAONPWR_GATING;
-		I915_WRITE(mg_regs[i], val);
-	}
-
-	val = I915_READ(MG_MISC_SUS0(tc_port));
-	val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
-	       MG_MISC_SUS0_CFG_TR2PWR_GATING |
-	       MG_MISC_SUS0_CFG_CL2PWR_GATING |
-	       MG_MISC_SUS0_CFG_GAONPWR_GATING |
-	       MG_MISC_SUS0_CFG_TRPWR_GATING |
-	       MG_MISC_SUS0_CFG_CL1PWR_GATING |
-	       MG_MISC_SUS0_CFG_DGPWR_GATING;
-	I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum port port = dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
-	u32 val;
-	int i;
-
-	if (tc_port == PORT_TC_NONE)
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
-		val = I915_READ(mg_regs[i]);
-		val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
-			 MG_DP_MODE_CFG_TRPWR_GATING |
-			 MG_DP_MODE_CFG_CLNPWR_GATING |
-			 MG_DP_MODE_CFG_DIGPWR_GATING |
-			 MG_DP_MODE_CFG_GAONPWR_GATING);
-		I915_WRITE(mg_regs[i], val);
-	}
-
-	val = I915_READ(MG_MISC_SUS0(tc_port));
-	val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
-		 MG_MISC_SUS0_CFG_TR2PWR_GATING |
-		 MG_MISC_SUS0_CFG_CL2PWR_GATING |
-		 MG_MISC_SUS0_CFG_GAONPWR_GATING |
-		 MG_MISC_SUS0_CFG_TRPWR_GATING |
-		 MG_MISC_SUS0_CFG_CL1PWR_GATING |
-		 MG_MISC_SUS0_CFG_DGPWR_GATING);
-	I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
 int
 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 {
@@ -455,7 +344,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
 	if (INTEL_GEN(dev_priv) >= 10) {
 		source_rates = cnl_rates;
 		size = ARRAY_SIZE(cnl_rates);
-		if (INTEL_GEN(dev_priv) == 10)
+		if (IS_GEN10(dev_priv))
 			max_rate = cnl_max_source_rate(intel_dp);
 		else
 			max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +505,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	int target_clock = mode->clock;
 	int max_rate, mode_rate, max_lanes, max_link_clock;
 	int max_dotclk;
+	u16 dsc_max_output_bpp = 0;
+	u8 dsc_slice_count = 0;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
@@ -641,7 +533,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 	mode_rate = intel_dp_link_required(target_clock, 18);
 
-	if (mode_rate > max_rate || target_clock > max_dotclk)
+	/*
+	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
+	 * integer value since we support only integer values of bpp.
+	 */
+	if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+		if (intel_dp_is_edp(intel_dp)) {
+			dsc_max_output_bpp =
+				drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+			dsc_slice_count =
+				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+								true);
+		} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+			dsc_max_output_bpp =
+				intel_dp_dsc_get_output_bpp(max_link_clock,
+							    max_lanes,
+							    target_clock,
+							    mode->hdisplay) >> 4;
+			dsc_slice_count =
+				intel_dp_dsc_get_slice_count(intel_dp,
+							     target_clock,
+							     mode->hdisplay);
+		}
+	}
+
+	if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
+	    target_clock > max_dotclk)
 		return MODE_CLOCK_HIGH;
 
 	if (mode->clock < 10000)
@@ -690,7 +608,8 @@ static void pps_lock(struct intel_dp *intel_dp)
 	 * See intel_power_sequencer_reset() why we need
 	 * a power domain reference here.
 	 */
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv,
+				intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 
 	mutex_lock(&dev_priv->pps_mutex);
 }
@@ -701,7 +620,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
 
 	mutex_unlock(&dev_priv->pps_mutex);
 
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_put(dev_priv,
+				intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 }
 
 static void
@@ -1156,6 +1076,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
 	if (index)
 		return 0;
@@ -1165,7 +1086,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
 	 * divide by 2000 and use that
 	 */
-	if (intel_dp->aux_ch == AUX_CH_A)
+	if (dig_port->aux_ch == AUX_CH_A)
 		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
 	else
 		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1095,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
-	if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
 		case 0: return 63;
@@ -1503,80 +1425,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 	return ret;
 }
 
-static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
-{
-	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum port port = encoder->port;
-	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[port];
-	enum aux_ch aux_ch;
-
-	if (!info->alternate_aux_channel) {
-		aux_ch = (enum aux_ch) port;
-
-		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
-			      aux_ch_name(aux_ch), port_name(port));
-		return aux_ch;
-	}
-
-	switch (info->alternate_aux_channel) {
-	case DP_AUX_A:
-		aux_ch = AUX_CH_A;
-		break;
-	case DP_AUX_B:
-		aux_ch = AUX_CH_B;
-		break;
-	case DP_AUX_C:
-		aux_ch = AUX_CH_C;
-		break;
-	case DP_AUX_D:
-		aux_ch = AUX_CH_D;
-		break;
-	case DP_AUX_E:
-		aux_ch = AUX_CH_E;
-		break;
-	case DP_AUX_F:
-		aux_ch = AUX_CH_F;
-		break;
-	default:
-		MISSING_CASE(info->alternate_aux_channel);
-		aux_ch = AUX_CH_A;
-		break;
-	}
-
-	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
-		      aux_ch_name(aux_ch), port_name(port));
-
-	return aux_ch;
-}
-
-static enum intel_display_power_domain
-intel_aux_power_domain(struct intel_dp *intel_dp)
-{
-	switch (intel_dp->aux_ch) {
-	case AUX_CH_A:
-		return POWER_DOMAIN_AUX_A;
-	case AUX_CH_B:
-		return POWER_DOMAIN_AUX_B;
-	case AUX_CH_C:
-		return POWER_DOMAIN_AUX_C;
-	case AUX_CH_D:
-		return POWER_DOMAIN_AUX_D;
-	case AUX_CH_E:
-		return POWER_DOMAIN_AUX_E;
-	case AUX_CH_F:
-		return POWER_DOMAIN_AUX_F;
-	default:
-		MISSING_CASE(intel_dp->aux_ch);
-		return POWER_DOMAIN_AUX_A;
-	}
-}
 
 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_B:
@@ -1592,7 +1446,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_B:
@@ -1608,7 +1463,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_A:
@@ -1626,7 +1482,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_A:
@@ -1644,7 +1501,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_A:
@@ -1663,7 +1521,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	enum aux_ch aux_ch = intel_dp->aux_ch;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	enum aux_ch aux_ch = dig_port->aux_ch;
 
 	switch (aux_ch) {
 	case AUX_CH_A:
@@ -1689,10 +1548,8 @@ static void
 intel_dp_aux_init(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-
-	intel_dp->aux_ch = intel_aux_ch(intel_dp);
-	intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &dig_port->base;
 
 	if (INTEL_GEN(dev_priv) >= 9) {
 		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1853,6 +1710,41 @@ struct link_config_limits {
 	int min_bpp, max_bpp;
 };
 
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+					 const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return INTEL_GEN(dev_priv) >= 11 &&
+		pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *pipe_config)
+{
+	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+		drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+					 const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	return INTEL_GEN(dev_priv) >= 10 &&
+		pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+				  const struct intel_crtc_state *pipe_config)
+{
+	if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+		return false;
+
+	return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 				struct intel_crtc_state *pipe_config)
 {
@@ -1951,14 +1843,158 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
 	return false;
 }
 
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static bool
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+				  struct intel_crtc_state *pipe_config,
+				  const struct link_config_limits *limits)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int bpp, clock, lane_count;
+	int mode_rate, link_clock, link_avail;
+
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   bpp);
+
+		for (lane_count = limits->min_lane_count;
+		     lane_count <= limits->max_lane_count;
+		     lane_count <<= 1) {
+			for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+				link_clock = intel_dp->common_rates[clock];
+				link_avail = intel_dp_max_data_rate(link_clock,
+								    lane_count);
+
+				if (mode_rate <= link_avail) {
+					pipe_config->lane_count = lane_count;
+					pipe_config->pipe_bpp = bpp;
+					pipe_config->port_clock = link_clock;
+
+					return true;
+				}
+			}
+		}
+	}
+
+	return false;
+}
+
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+	int i, num_bpc;
+	u8 dsc_bpc[3] = {0};
+
+	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+						       dsc_bpc);
+	for (i = 0; i < num_bpc; i++) {
+		if (dsc_max_bpc >= dsc_bpc[i])
+			return dsc_bpc[i] * 3;
+	}
+
+	return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+					struct intel_crtc_state *pipe_config,
+					struct drm_connector_state *conn_state,
+					struct link_config_limits *limits)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	u8 dsc_max_bpc;
+	int pipe_bpp;
+
+	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+		return false;
+
+	dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+			    conn_state->max_requested_bpc);
+
+	pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+	if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+		DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+		return false;
+	}
+
+	/*
+	 * For now enable DSC for max bpp, max link rate, max lane count.
+	 * Optimize this later for the minimum possible link rate/lane count
+	 * with DSC enabled for the requested mode.
+	 */
+	pipe_config->pipe_bpp = pipe_bpp;
+	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+	pipe_config->lane_count = limits->max_lane_count;
+
+	if (intel_dp_is_edp(intel_dp)) {
+		pipe_config->dsc_params.compressed_bpp =
+			min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+			      pipe_config->pipe_bpp);
+		pipe_config->dsc_params.slice_count =
+			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+							true);
+	} else {
+		u16 dsc_max_output_bpp;
+		u8 dsc_dp_slice_count;
+
+		dsc_max_output_bpp =
+			intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+						    pipe_config->lane_count,
+						    adjusted_mode->crtc_clock,
+						    adjusted_mode->crtc_hdisplay);
+		dsc_dp_slice_count =
+			intel_dp_dsc_get_slice_count(intel_dp,
+						     adjusted_mode->crtc_clock,
+						     adjusted_mode->crtc_hdisplay);
+		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+			DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+			return false;
+		}
+		pipe_config->dsc_params.compressed_bpp = min_t(u16,
+							       dsc_max_output_bpp >> 4,
+							       pipe_config->pipe_bpp);
+		pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+	}
+	/*
+	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+	 * is greater than the maximum Cdclock and if slice count is even
+	 * then we need to use 2 VDSC instances.
+	 */
+	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+		if (pipe_config->dsc_params.slice_count > 1) {
+			pipe_config->dsc_params.dsc_split = true;
+		} else {
+			DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+			return false;
+		}
+	}
+	if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+		DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+			      "Compressed BPP = %d\n",
+			      pipe_config->pipe_bpp,
+			      pipe_config->dsc_params.compressed_bpp);
+		return false;
+	}
+	pipe_config->dsc_params.compression_enable = true;
+	DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+		      "Compressed Bpp = %d Slice Count = %d\n",
+		      pipe_config->pipe_bpp,
+		      pipe_config->dsc_params.compressed_bpp,
+		      pipe_config->dsc_params.slice_count);
+
+	return true;
+}
+
 static bool
 intel_dp_compute_link_config(struct intel_encoder *encoder,
-			     struct intel_crtc_state *pipe_config)
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct link_config_limits limits;
 	int common_len;
+	bool ret;
 
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
 						    intel_dp->max_link_rate);
@@ -1975,13 +2011,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	limits.min_bpp = 6 * 3;
 	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-	if (intel_dp_is_edp(intel_dp)) {
+	if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
 		/*
 		 * Use the maximum clock and number of lanes the eDP panel
-		 * advertizes being capable of. The panels are generally
-		 * designed to support only a single clock and lane
-		 * configuration, and typically these values correspond to the
-		 * native resolution of the panel.
+		 * advertizes being capable of. The eDP 1.3 and earlier panels
+		 * are generally designed to support only a single clock and
+		 * lane configuration, and typically these values correspond to
+		 * the native resolution of the panel. With eDP 1.4 rate select
+		 * and DSC, this is decreasingly the case, and we need to be
+		 * able to select less than maximum link config.
 		 */
 		limits.min_lane_count = limits.max_lane_count;
 		limits.min_clock = limits.max_clock;
@@ -1995,23 +2033,52 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		      intel_dp->common_rates[limits.max_clock],
 		      limits.max_bpp, adjusted_mode->crtc_clock);
 
-	/*
-	 * Optimize for slow and wide. This is the place to add alternative
-	 * optimization policy.
-	 */
-	if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
-		return false;
+	if (intel_dp_is_edp(intel_dp))
+		/*
+		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+		 * section A.1: "It is recommended that the minimum number of
+		 * lanes be used, using the minimum link rate allowed for that
+		 * lane configuration."
+		 *
+		 * Note that we use the max clock and lane count for eDP 1.3 and
+		 * earlier, and fast vs. wide is irrelevant.
+		 */
+		ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+							&limits);
+	else
+		/* Optimize for slow and wide. */
+		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+							&limits);
 
-	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
-		      pipe_config->lane_count, pipe_config->port_clock,
-		      pipe_config->pipe_bpp);
+	/* enable compression if the mode doesn't fit available BW */
+	if (!ret) {
+		if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+						 conn_state, &limits))
+			return false;
+	}
 
-	DRM_DEBUG_KMS("DP link rate required %i available %i\n",
-		      intel_dp_link_required(adjusted_mode->crtc_clock,
-					     pipe_config->pipe_bpp),
-		      intel_dp_max_data_rate(pipe_config->port_clock,
-					     pipe_config->lane_count));
+	if (pipe_config->dsc_params.compression_enable) {
+		DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+			      pipe_config->lane_count, pipe_config->port_clock,
+			      pipe_config->pipe_bpp,
+			      pipe_config->dsc_params.compressed_bpp);
 
+		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+			      intel_dp_link_required(adjusted_mode->crtc_clock,
+						     pipe_config->dsc_params.compressed_bpp),
+			      intel_dp_max_data_rate(pipe_config->port_clock,
+						     pipe_config->lane_count));
+	} else {
+		DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+			      pipe_config->lane_count, pipe_config->port_clock,
+			      pipe_config->pipe_bpp);
+
+		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+			      intel_dp_link_required(adjusted_mode->crtc_clock,
+						     pipe_config->pipe_bpp),
+			      intel_dp_max_data_rate(pipe_config->port_clock,
+						     pipe_config->lane_count));
+	}
 	return true;
 }
 
@@ -2023,6 +2090,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
 	enum port port = encoder->port;
 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +2102,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
 		pipe_config->has_pch_encoder = true;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+	if (lspcon->active)
+		lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+
 	pipe_config->has_drrs = false;
 	if (IS_G4X(dev_priv) || port == PORT_A)
 		pipe_config->has_audio = false;
@@ -2072,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
 		return false;
 
-	if (!intel_dp_compute_link_config(encoder, pipe_config))
+	pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+				  intel_dp_supports_fec(intel_dp, pipe_config);
+
+	if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
 		return false;
 
 	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2090,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
 	}
 
-	intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
-			       adjusted_mode->crtc_clock,
-			       pipe_config->port_clock,
-			       &pipe_config->dp_m_n,
-			       constant_n);
+	if (!pipe_config->dsc_params.compression_enable)
+		intel_link_compute_m_n(pipe_config->pipe_bpp,
+				       pipe_config->lane_count,
+				       adjusted_mode->crtc_clock,
+				       pipe_config->port_clock,
+				       &pipe_config->dp_m_n,
+				       constant_n);
+	else
+		intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+				       pipe_config->lane_count,
+				       adjusted_mode->crtc_clock,
+				       pipe_config->port_clock,
+				       &pipe_config->dp_m_n,
+				       constant_n);
 
 	if (intel_connector->panel.downclock_mode != NULL &&
 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2338,7 +2422,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 	if (edp_have_panel_vdd(intel_dp))
 		return need_to_disable;
 
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv,
+				intel_aux_power_domain(intel_dig_port));
 
 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
 		      port_name(intel_dig_port->base.port));
@@ -2424,7 +2509,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 	if ((pp & PANEL_POWER_ON) == 0)
 		intel_dp->panel_power_off_time = ktime_get_boottime();
 
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_put(dev_priv,
+				intel_aux_power_domain(intel_dig_port));
 }
 
 static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2623,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
 static void edp_panel_off(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	u32 pp;
 	i915_reg_t pp_ctrl_reg;
 
@@ -2546,10 +2633,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
 		return;
 
 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
-		      port_name(dp_to_dig_port(intel_dp)->base.port));
+		      port_name(dig_port->base.port));
 
 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
-	     port_name(dp_to_dig_port(intel_dp)->base.port));
+	     port_name(dig_port->base.port));
 
 	pp = ironlake_get_pp_control(intel_dp);
 	/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2655,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
 	intel_dp->panel_power_off_time = ktime_get_boottime();
 
 	/* We got a reference when we enabled the VDD. */
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
 }
 
 void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2788,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
 		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
 }
 
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+					   const struct intel_crtc_state *crtc_state,
+					   bool enable)
+{
+	int ret;
+
+	if (!crtc_state->dsc_params.compression_enable)
+		return;
+
+	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+				 enable ? DP_DECOMPRESSION_EN : 0);
+	if (ret < 0)
+		DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+			      enable ? "enable" : "disable");
+}
+
 /* If the sink supports it, try to set the power state appropriately */
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 {
@@ -3900,6 +4003,40 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
 	return intel_dp->dpcd[DP_DPCD_REV] != 0;
 }
 
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+	/*
+	 * Clear the cached register set to avoid using stale values
+	 * for the sinks that do not support DSC.
+	 */
+	memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+	/* Clear fec_capable to avoid using stale values */
+	intel_dp->fec_capable = 0;
+
+	/* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+	    intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+		if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+				     intel_dp->dsc_dpcd,
+				     sizeof(intel_dp->dsc_dpcd)) < 0)
+			DRM_ERROR("Failed to read DPCD register 0x%x\n",
+				  DP_DSC_SUPPORT);
+
+		DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
+			      (int)sizeof(intel_dp->dsc_dpcd),
+			      intel_dp->dsc_dpcd);
+
+		/* FEC is supported only on DP 1.4 */
+		if (!intel_dp_is_edp(intel_dp) &&
+		    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+				      &intel_dp->fec_capable) < 0)
+			DRM_ERROR("Failed to read FEC DPCD register\n");
+
+		DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+	}
+}
+
 static bool
 intel_edp_init_dpcd(struct intel_dp *intel_dp)
 {
@@ -3976,6 +4113,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 
 	intel_dp_set_common_rates(intel_dp);
 
+	/* Read the eDP DSC DPCD registers */
+	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+		intel_dp_get_dsc_sink_cap(intel_dp);
+
 	return true;
 }
 
@@ -3983,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
-	u8 sink_count;
-
 	if (!intel_dp_read_dpcd(intel_dp))
 		return false;
 
@@ -3994,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 		intel_dp_set_common_rates(intel_dp);
 	}
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
-		return false;
-
 	/*
-	 * Sink count can change between short pulse hpd hence
-	 * a member variable in intel_dp will track any changes
-	 * between short pulse interrupts.
+	 * Some eDP panels do not set a valid value for sink count, that is why
+	 * it don't care about read it here and in intel_edp_init_dpcd().
 	 */
-	intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+	if (!intel_dp_is_edp(intel_dp)) {
+		u8 count;
+		ssize_t r;
 
-	/*
-	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
-	 * a dongle is present but no display. Unless we require to know
-	 * if a dongle is present or not, we don't need to update
-	 * downstream port information. So, an early return here saves
-	 * time from performing other operations which are not required.
-	 */
-	if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
-		return false;
+		r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+		if (r < 1)
+			return false;
+
+		/*
+		 * Sink count can change between short pulse hpd hence
+		 * a member variable in intel_dp will track any changes
+		 * between short pulse interrupts.
+		 */
+		intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+		/*
+		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+		 * a dongle is present but no display. Unless we require to know
+		 * if a dongle is present or not, we don't need to update
+		 * downstream port information. So, an early return here saves
+		 * time from performing other operations which are not required.
+		 */
+		if (!intel_dp->sink_count)
+			return false;
+	}
 
 	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return true; /* native DP sink */
@@ -4029,16 +4178,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 }
 
 static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+intel_dp_sink_can_mst(struct intel_dp *intel_dp)
 {
 	u8 mstm_cap;
 
-	if (!i915_modparams.enable_dp_mst)
-		return false;
-
-	if (!intel_dp->can_mst)
-		return false;
-
 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
 		return false;
 
@@ -4048,34 +4191,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
 	return mstm_cap & DP_MST_CAP;
 }
 
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+	return i915_modparams.enable_dp_mst &&
+		intel_dp->can_mst &&
+		intel_dp_sink_can_mst(intel_dp);
+}
+
 static void
 intel_dp_configure_mst(struct intel_dp *intel_dp)
 {
-	if (!i915_modparams.enable_dp_mst)
-		return;
+	struct intel_encoder *encoder =
+		&dp_to_dig_port(intel_dp)->base;
+	bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+
+	DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
+		      port_name(encoder->port), yesno(intel_dp->can_mst),
+		      yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
 
 	if (!intel_dp->can_mst)
 		return;
 
-	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
-
-	if (intel_dp->is_mst)
-		DRM_DEBUG_KMS("Sink is MST capable\n");
-	else
-		DRM_DEBUG_KMS("Sink is not MST capable\n");
+	intel_dp->is_mst = sink_can_mst &&
+		i915_modparams.enable_dp_mst;
 
 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
 					intel_dp->is_mst);
 }
 
 static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
-	return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
-				 sink_irq_vector) == 1;
-}
-
-static bool
 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
 	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4228,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 		DP_DPRX_ESI_LEN;
 }
 
+u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+				int mode_clock, int mode_hdisplay)
+{
+	u16 bits_per_pixel, max_bpp_small_joiner_ram;
+	int i;
+
+	/*
+	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+	 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+	 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+	 * for MST -> TimeSlotsPerMTP has to be calculated
+	 */
+	bits_per_pixel = (link_clock * lane_count * 8 *
+			  DP_DSC_FEC_OVERHEAD_FACTOR) /
+		mode_clock;
+
+	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+	max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+		mode_hdisplay;
+
+	/*
+	 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+	 * check, output bpp from small joiner RAM check)
+	 */
+	bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+	/* Error out if the max bpp is less than smallest allowed valid bpp */
+	if (bits_per_pixel < valid_dsc_bpp[0]) {
+		DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+		return 0;
+	}
+
+	/* Find the nearest match in the array of known BPPs from VESA */
+	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+		if (bits_per_pixel < valid_dsc_bpp[i + 1])
+			break;
+	}
+	bits_per_pixel = valid_dsc_bpp[i];
+
+	/*
+	 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+	 * fractional part is 0
+	 */
+	return bits_per_pixel << 4;
+}
+
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+				int mode_clock,
+				int mode_hdisplay)
+{
+	u8 min_slice_count, i;
+	int max_slice_width;
+
+	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+		min_slice_count = DIV_ROUND_UP(mode_clock,
+					       DP_DSC_MAX_ENC_THROUGHPUT_0);
+	else
+		min_slice_count = DIV_ROUND_UP(mode_clock,
+					       DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+	max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+		DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+			      max_slice_width);
+		return 0;
+	}
+	/* Also take into account max slice width */
+	min_slice_count = min_t(uint8_t, min_slice_count,
+				DIV_ROUND_UP(mode_hdisplay,
+					     max_slice_width));
+
+	/* Find the closest match to the valid slice count values */
+	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+		if (valid_dsc_slicecount[i] >
+		    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+						    false))
+			break;
+		if (min_slice_count  <= valid_dsc_slicecount[i])
+			return valid_dsc_slicecount[i];
+	}
+
+	DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+	return 0;
+}
+
 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
 {
 	int status = 0;
@@ -4341,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
 	if (!intel_dp->link_trained)
 		return false;
 
+	/*
+	 * While PSR source HW is enabled, it will control main-link sending
+	 * frames, enabling and disabling it so trying to do a retrain will fail
+	 * as the link would or not be on or it could mix training patterns
+	 * and frame data at the same time causing retrain to fail.
+	 * Also when exiting PSR, HW will retrain the link anyways fixing
+	 * any link status error.
+	 */
+	if (intel_psr_enabled(intel_dp))
+		return false;
+
 	if (!intel_dp_get_link_status(intel_dp, link_status))
 		return false;
 
@@ -4403,7 +4644,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 
 	/* Suppress underruns caused by re-training */
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-	if (crtc->config->has_pch_encoder)
+	if (crtc_state->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv,
 						      intel_crtc_pch_transcoder(crtc), false);
 
@@ -4414,7 +4655,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 	intel_wait_for_vblank(dev_priv, crtc->pipe);
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-	if (crtc->config->has_pch_encoder)
+	if (crtc_state->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv,
 						      intel_crtc_pch_transcoder(crtc), true);
 
@@ -4462,6 +4703,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
 	return changed;
 }
 
+static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+{
+	u8 val;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+		return;
+
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+	if (val & DP_AUTOMATED_TEST_REQUEST)
+		intel_dp_handle_test_request(intel_dp);
+
+	if (val & DP_CP_IRQ)
+		intel_hdcp_check_link(intel_dp->attached_connector);
+
+	if (val & DP_SINK_SPECIFIC_IRQ)
+		DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -4479,7 +4743,6 @@ static bool
 intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	u8 sink_irq_vector = 0;
 	u8 old_sink_count = intel_dp->sink_count;
 	bool ret;
 
@@ -4502,20 +4765,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 		return false;
 	}
 
-	/* Try to read the source of the interrupt */
-	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
-	    sink_irq_vector != 0) {
-		/* Clear interrupt source */
-		drm_dp_dpcd_writeb(&intel_dp->aux,
-				   DP_DEVICE_SERVICE_IRQ_VECTOR,
-				   sink_irq_vector);
-
-		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
-			intel_dp_handle_test_request(intel_dp);
-		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
-			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
-	}
+	intel_dp_check_service_irq(intel_dp);
 
 	/* Handle CEC interrupts, if any */
 	drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +5060,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
 			      type_str);
 }
 
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+				  struct intel_digital_port *dig_port);
+
 /*
  * This function implements the first part of the Connect Flow described by our
  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +5117,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
 	if (dig_port->tc_type == TC_PORT_TYPEC &&
 	    !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
 		DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
-		val = I915_READ(PORT_TX_DFLEXDPCSSS);
-		val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
-		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+		icl_tc_phy_disconnect(dev_priv, dig_port);
 		return false;
 	}
 
@@ -4881,21 +5132,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
 				  struct intel_digital_port *dig_port)
 {
 	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-	u32 val;
 
-	if (dig_port->tc_type != TC_PORT_LEGACY &&
-	    dig_port->tc_type != TC_PORT_TYPEC)
+	if (dig_port->tc_type == TC_PORT_UNKNOWN)
 		return;
 
 	/*
-	 * This function may be called many times in a row without an HPD event
-	 * in between, so try to avoid the write when we can.
+	 * TBT disconnection flow is read the live status, what was done in
+	 * caller.
 	 */
-	val = I915_READ(PORT_TX_DFLEXDPCSSS);
-	if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+	if (dig_port->tc_type == TC_PORT_TYPEC ||
+	    dig_port->tc_type == TC_PORT_LEGACY) {
+		u32 val;
+
+		val = I915_READ(PORT_TX_DFLEXDPCSSS);
 		val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
 		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
 	}
+
+	dig_port->tc_type = TC_PORT_UNKNOWN;
 }
 
 /*
@@ -4945,19 +5199,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
-	switch (encoder->hpd_pin) {
-	case HPD_PORT_A:
-	case HPD_PORT_B:
+	if (intel_port_is_combophy(dev_priv, encoder->port))
 		return icl_combo_port_connected(dev_priv, dig_port);
-	case HPD_PORT_C:
-	case HPD_PORT_D:
-	case HPD_PORT_E:
-	case HPD_PORT_F:
+	else if (intel_port_is_tc(dev_priv, encoder->port))
 		return icl_tc_port_connected(dev_priv, dig_port);
-	default:
+	else
 		MISSING_CASE(encoder->hpd_pin);
-		return false;
-	}
+
+	return false;
 }
 
 /*
@@ -4982,20 +5231,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
 			return g4x_digital_port_connected(encoder);
 	}
 
-	if (IS_GEN5(dev_priv))
-		return ilk_digital_port_connected(encoder);
-	else if (IS_GEN6(dev_priv))
-		return snb_digital_port_connected(encoder);
-	else if (IS_GEN7(dev_priv))
-		return ivb_digital_port_connected(encoder);
-	else if (IS_GEN8(dev_priv))
-		return bdw_digital_port_connected(encoder);
+	if (INTEL_GEN(dev_priv) >= 11)
+		return icl_digital_port_connected(encoder);
+	else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+		return spt_digital_port_connected(encoder);
 	else if (IS_GEN9_LP(dev_priv))
 		return bxt_digital_port_connected(encoder);
-	else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
-		return spt_digital_port_connected(encoder);
-	else
-		return icl_digital_port_connected(encoder);
+	else if (IS_GEN8(dev_priv))
+		return bdw_digital_port_connected(encoder);
+	else if (IS_GEN7(dev_priv))
+		return ivb_digital_port_connected(encoder);
+	else if (IS_GEN6(dev_priv))
+		return snb_digital_port_connected(encoder);
+	else if (IS_GEN5(dev_priv))
+		return ilk_digital_port_connected(encoder);
+
+	MISSING_CASE(INTEL_GEN(dev_priv));
+	return false;
 }
 
 static struct edid *
@@ -5042,28 +5294,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector,
-		    struct drm_modeset_acquire_ctx *ctx)
+intel_dp_detect(struct drm_connector *connector,
+		struct drm_modeset_acquire_ctx *ctx,
+		bool force)
 {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &dig_port->base;
 	enum drm_connector_status status;
-	u8 sink_irq_vector = 0;
+	enum intel_display_power_domain aux_domain =
+		intel_aux_power_domain(dig_port);
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
 	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv, aux_domain);
 
 	/* Can't disconnect eDP */
 	if (intel_dp_is_edp(intel_dp))
 		status = edp_detect(intel_dp);
-	else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
+	else if (intel_digital_port_connected(encoder))
 		status = intel_dp_detect_dpcd(intel_dp);
 	else
 		status = connector_status_disconnected;
 
 	if (status == connector_status_disconnected) {
 		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
 
 		if (intel_dp->is_mst) {
 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5348,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
 
 	intel_dp_print_rates(intel_dp);
 
+	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+	if (INTEL_GEN(dev_priv) >= 11)
+		intel_dp_get_dsc_sink_cap(intel_dp);
+
 	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
 			 drm_dp_is_branch(intel_dp->dpcd));
 
@@ -5109,9 +5372,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
 	 * with an IRQ_HPD, so force a link status check.
 	 */
 	if (!intel_dp_is_edp(intel_dp)) {
-		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+		int ret;
 
-		intel_dp_retrain_link(encoder, ctx);
+		ret = intel_dp_retrain_link(encoder, ctx);
+		if (ret) {
+			intel_display_power_put(dev_priv, aux_domain);
+			return ret;
+		}
 	}
 
 	/*
@@ -5123,61 +5390,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
 	intel_dp->aux.i2c_defer_count = 0;
 
 	intel_dp_set_edid(intel_dp);
-	if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
+	if (intel_dp_is_edp(intel_dp) ||
+	    to_intel_connector(connector)->detect_edid)
 		status = connector_status_connected;
-	intel_dp->detect_done = true;
 
-	/* Try to read the source of the interrupt */
-	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
-	    sink_irq_vector != 0) {
-		/* Clear interrupt source */
-		drm_dp_dpcd_writeb(&intel_dp->aux,
-				   DP_DEVICE_SERVICE_IRQ_VECTOR,
-				   sink_irq_vector);
-
-		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
-			intel_dp_handle_test_request(intel_dp);
-		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
-			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
-	}
+	intel_dp_check_service_irq(intel_dp);
 
 out:
 	if (status != connector_status_connected && !intel_dp->is_mst)
 		intel_dp_unset_edid(intel_dp);
 
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
-	return status;
-}
-
-static int
-intel_dp_detect(struct drm_connector *connector,
-		struct drm_modeset_acquire_ctx *ctx,
-		bool force)
-{
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	int status = connector->status;
-
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.id, connector->name);
-
-	/* If full detect is not performed yet, do a full detect */
-	if (!intel_dp->detect_done) {
-		struct drm_crtc *crtc;
-		int ret;
-
-		crtc = connector->state->crtc;
-		if (crtc) {
-			ret = drm_modeset_lock(&crtc->mutex, ctx);
-			if (ret)
-				return ret;
-		}
-
-		status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
-	}
-
-	intel_dp->detect_done = false;
-
+	intel_display_power_put(dev_priv, aux_domain);
 	return status;
 }
 
@@ -5185,8 +5408,11 @@ static void
 intel_dp_force(struct drm_connector *connector)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &dig_port->base;
 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	enum intel_display_power_domain aux_domain =
+		intel_aux_power_domain(dig_port);
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
@@ -5195,11 +5421,11 @@ intel_dp_force(struct drm_connector *connector)
 	if (connector->status != connector_status_connected)
 		return;
 
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv, aux_domain);
 
 	intel_dp_set_edid(intel_dp);
 
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_put(dev_priv, aux_domain);
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5490,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
 	intel_connector_unregister(connector);
 }
 
-static void
-intel_dp_connector_destroy(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-
-	kfree(intel_connector->detect_edid);
-
-	if (!IS_ERR_OR_NULL(intel_connector->edid))
-		kfree(intel_connector->edid);
-
-	/*
-	 * Can't call intel_dp_is_edp() since the encoder may have been
-	 * destroyed already.
-	 */
-	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		intel_panel_fini(&intel_connector->panel);
-
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5553,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 	dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
 				     an, DRM_HDCP_AN_LEN);
 	if (dpcd_ret != DRM_HDCP_AN_LEN) {
-		DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+		DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
+			      dpcd_ret);
 		return dpcd_ret >= 0 ? -EIO : dpcd_ret;
 	}
 
@@ -5364,10 +5570,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 				rxbuf, sizeof(rxbuf),
 				DP_AUX_CH_CTL_AUX_AKSV_SELECT);
 	if (ret < 0) {
-		DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
 		return ret;
 	} else if (ret == 0) {
-		DRM_ERROR("Aksv write over DP/AUX was empty\n");
+		DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
 		return -EIO;
 	}
 
@@ -5382,7 +5588,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
 			       DRM_HDCP_KSV_LEN);
 	if (ret != DRM_HDCP_KSV_LEN) {
-		DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 	return 0;
@@ -5400,7 +5606,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
 			       bstatus, DRM_HDCP_BSTATUS_LEN);
 	if (ret != DRM_HDCP_BSTATUS_LEN) {
-		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 	return 0;
@@ -5415,7 +5621,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
 			       bcaps, 1);
 	if (ret != 1) {
-		DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 
@@ -5445,7 +5651,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
 			       ri_prime, DRM_HDCP_RI_LEN);
 	if (ret != DRM_HDCP_RI_LEN) {
-		DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 	return 0;
@@ -5460,7 +5666,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
 			       &bstatus, 1);
 	if (ret != 1) {
-		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 	*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5688,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
 				       ksv_fifo + i * DRM_HDCP_KSV_LEN,
 				       len);
 		if (ret != len) {
-			DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
-				  ret);
+			DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
+				      i, ret);
 			return ret >= 0 ? -EIO : ret;
 		}
 	}
@@ -5503,7 +5709,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 			       DP_AUX_HDCP_V_PRIME(i), part,
 			       DRM_HDCP_V_PRIME_PART_LEN);
 	if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
-		DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+		DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
 		return ret >= 0 ? -EIO : ret;
 	}
 	return 0;
@@ -5526,7 +5732,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
 			       &bstatus, 1);
 	if (ret != 1) {
-		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
 		return false;
 	}
 
@@ -5565,6 +5771,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5578,7 +5785,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 	 * indefinitely.
 	 */
 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
 
 	edp_panel_vdd_schedule_off(intel_dp);
 }
@@ -5631,7 +5838,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
 	.late_register = intel_dp_connector_register,
 	.early_unregister = intel_dp_connector_unregister,
-	.destroy = intel_dp_connector_destroy,
+	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
@@ -5673,11 +5880,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
 	if (long_hpd) {
 		intel_dp->reset_link_params = true;
-		intel_dp->detect_done = false;
 		return IRQ_NONE;
 	}
 
-	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_get(dev_priv,
+				intel_aux_power_domain(intel_dig_port));
 
 	if (intel_dp->is_mst) {
 		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5897,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 			intel_dp->is_mst = false;
 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
 							intel_dp->is_mst);
-			intel_dp->detect_done = false;
 			goto put_power;
 		}
 	}
@@ -5700,19 +5906,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
 		handled = intel_dp_short_pulse(intel_dp);
 
-		/* Short pulse can signify loss of hdcp authentication */
-		intel_hdcp_check_link(intel_dp->attached_connector);
-
-		if (!handled) {
-			intel_dp->detect_done = false;
+		if (!handled)
 			goto put_power;
-		}
 	}
 
 	ret = IRQ_HANDLED;
 
 put_power:
-	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+	intel_display_power_put(dev_priv,
+				intel_aux_power_domain(intel_dig_port));
 
 	return ret;
 }
@@ -5743,6 +5945,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 		intel_attach_force_audio_property(connector);
 
 	intel_attach_broadcast_rgb_property(connector);
+	if (HAS_GMCH_DISPLAY(dev_priv))
+		drm_connector_attach_max_bpc_property(connector, 6, 10);
+	else if (INTEL_GEN(dev_priv) >= 5)
+		drm_connector_attach_max_bpc_property(connector, 6, 12);
 
 	if (intel_dp_is_edp(intel_dp)) {
 		u32 allowed_scalers;
@@ -6099,10 +6305,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
 		switch (index) {
 		case DRRS_HIGH_RR:
-			intel_dp_set_m_n(intel_crtc, M1_N1);
+			intel_dp_set_m_n(crtc_state, M1_N1);
 			break;
 		case DRRS_LOW_RR:
-			intel_dp_set_m_n(intel_crtc, M2_N2);
+			intel_dp_set_m_n(crtc_state, M2_N2);
 			break;
 		case DRRS_MAX_RR:
 		default:
@@ -6422,6 +6628,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	if (!intel_dp_is_edp(intel_dp))
 		return true;
 
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
+
 	/*
 	 * On IBX/CPT we may get here with LVDS already registered. Since the
 	 * driver uses the only internal power sequencer available for both
@@ -6514,6 +6722,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
 	intel_panel_setup_backlight(connector, pipe);
 
+	if (fixed_mode)
+		drm_connector_init_panel_orientation_property(
+			connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+
 	return true;
 
 out_vdd_off:
@@ -6624,9 +6836,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
 	intel_dp_aux_init(intel_dp);
 
-	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-			  edp_panel_vdd_work);
-
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
 	if (HAS_DDI(dev_priv))
@@ -6743,6 +6952,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
 	if (port != PORT_A)
 		intel_infoframe_init(intel_dig_port);
 
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
 		goto err_init_connector;
 
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a911691..4de247d 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	pipe_config->has_pch_encoder = false;
 	bpp = 24;
 	if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
-	if (intel_dp->active_mst_links == 0 &&
-	    intel_dig_port->base.pre_pll_enable)
+	if (intel_dp->active_mst_links == 0)
 		intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
 						    pipe_config, NULL);
 }
 
+static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
+					  const struct intel_crtc_state *old_crtc_state,
+					  const struct drm_connector_state *old_conn_state)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	if (intel_dp->active_mst_links == 0)
+		intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
+						      old_crtc_state,
+						      old_conn_state);
+}
+
 static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 				    const struct intel_crtc_state *pipe_config,
 				    const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
 				      intel_connector->port);
 }
 
-static void
-intel_dp_mst_connector_destroy(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-
-	if (!IS_ERR_OR_NULL(intel_connector->edid))
-		kfree(intel_connector->edid);
-
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
 	.detect = intel_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_dp_mst_connector_destroy,
+	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 	intel_encoder->disable = intel_mst_disable_dp;
 	intel_encoder->post_disable = intel_mst_post_disable_dp;
 	intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+	intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
 	intel_encoder->pre_enable = intel_mst_pre_enable_dp;
 	intel_encoder->enable = intel_mst_enable_dp;
 	intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab6..3c7f10d 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
-	if (crtc->config->lane_count > 2) {
+	if (crtc_state->lane_count > 2) {
 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
 		if (reset)
 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 		val |= DPIO_PCS_CLK_SOFT_RESET;
 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
-	if (crtc->config->lane_count > 2) {
+	if (crtc_state->lane_count > 2) {
 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
 		val |= CHV_PCS_REQ_SOFTRESET_EN;
 		if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac92..d513ca8 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 
 /**
  * intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc: CRTC which has a shared dpll
+ * @crtc_state: CRTC, and its state, which has a shared dpll
  *
  * This calls the PLL's prepare hook if it has one and if the PLL is not
  * already enabled. The prepare hook is platform specific.
  */
-void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
 	if (WARN_ON(pll == NULL))
 		return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 
 /**
  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
  *
  * Enable the shared DPLL used by @crtc.
  */
-void intel_enable_shared_dpll(struct intel_crtc *crtc)
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 	unsigned int old_mask;
 
@@ -199,14 +199,15 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
 
 /**
  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
  *
  * Disable the shared DPLL used by @crtc.
  */
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 
 	/* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 				 struct intel_shared_dpll *pll)
 {
 	const enum intel_dpll_id id = pll->info->id;
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc;
-
-	/* Make sure no transcoder isn't still depending on us. */
-	for_each_intel_crtc(dev, crtc) {
-		if (crtc->config->shared_dpll == pll)
-			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
-	}
 
 	I915_WRITE(PCH_DPLL(id), 0);
 	POSTING_READ(PCH_DPLL(id));
@@ -2530,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 
 	if (intel_port_is_tc(dev_priv, encoder->port))
 		ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
-	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
 		ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
 	else
 		ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2628,11 +2622,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
 	return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
 }
 
-static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
 {
 	return port - PORT_C + DPLL_ID_ICL_MGPLL1;
 }
 
+bool intel_dpll_is_combophy(enum intel_dpll_id id)
+{
+	return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
+}
+
 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
 				     uint32_t *target_dco_khz,
 				     struct intel_dpll_hw_state *state)
@@ -2874,8 +2873,8 @@ static struct intel_shared_dpll *
 icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
-	struct intel_digital_port *intel_dig_port =
-			enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *intel_dig_port;
 	struct intel_shared_dpll *pll;
 	struct intel_dpll_hw_state pll_state = {};
 	enum port port = encoder->port;
@@ -2883,18 +2882,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	int clock = crtc_state->port_clock;
 	bool ret;
 
-	switch (port) {
-	case PORT_A:
-	case PORT_B:
+	if (intel_port_is_combophy(dev_priv, port)) {
 		min = DPLL_ID_ICL_DPLL0;
 		max = DPLL_ID_ICL_DPLL1;
 		ret = icl_calc_dpll_state(crtc_state, encoder, clock,
 					  &pll_state);
-		break;
-	case PORT_C:
-	case PORT_D:
-	case PORT_E:
-	case PORT_F:
+	} else if (intel_port_is_tc(dev_priv, port)) {
+		if (encoder->type == INTEL_OUTPUT_DP_MST) {
+			struct intel_dp_mst_encoder *mst_encoder;
+
+			mst_encoder = enc_to_mst(&encoder->base);
+			intel_dig_port = mst_encoder->primary;
+		} else {
+			intel_dig_port = enc_to_dig_port(&encoder->base);
+		}
+
 		if (intel_dig_port->tc_type == TC_PORT_TBT) {
 			min = DPLL_ID_ICL_TBTPLL;
 			max = min;
@@ -2906,8 +2908,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 			ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
 						    &pll_state);
 		}
-		break;
-	default:
+	} else {
 		MISSING_CASE(port);
 		return NULL;
 	}
@@ -2932,21 +2933,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 
 static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
 {
-	switch (id) {
-	default:
-		MISSING_CASE(id);
-		/* fall through */
-	case DPLL_ID_ICL_DPLL0:
-	case DPLL_ID_ICL_DPLL1:
+	if (intel_dpll_is_combophy(id))
 		return CNL_DPLL_ENABLE(id);
-	case DPLL_ID_ICL_TBTPLL:
+	else if (id == DPLL_ID_ICL_TBTPLL)
 		return TBT_PLL_ENABLE;
-	case DPLL_ID_ICL_MGPLL1:
-	case DPLL_ID_ICL_MGPLL2:
-	case DPLL_ID_ICL_MGPLL3:
-	case DPLL_ID_ICL_MGPLL4:
+	else
+		/*
+		 * TODO: Make MG_PLL macros use
+		 * tc port id instead of port id
+		 */
 		return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
-	}
 }
 
 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2961,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!(val & PLL_ENABLE))
 		goto out;
 
-	switch (id) {
-	case DPLL_ID_ICL_DPLL0:
-	case DPLL_ID_ICL_DPLL1:
-	case DPLL_ID_ICL_TBTPLL:
+	if (intel_dpll_is_combophy(id) ||
+	    id == DPLL_ID_ICL_TBTPLL) {
 		hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
 		hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
-		break;
-	case DPLL_ID_ICL_MGPLL1:
-	case DPLL_ID_ICL_MGPLL2:
-	case DPLL_ID_ICL_MGPLL3:
-	case DPLL_ID_ICL_MGPLL4:
+	} else {
 		port = icl_mg_pll_id_to_port(id);
 		hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
 		hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3003,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 		hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
-		break;
-	default:
-		MISSING_CASE(id);
 	}
 
 	ret = true;
@@ -3104,21 +3091,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 				    PLL_POWER_STATE, 1))
 		DRM_ERROR("PLL %d Power not enabled\n", id);
 
-	switch (id) {
-	case DPLL_ID_ICL_DPLL0:
-	case DPLL_ID_ICL_DPLL1:
-	case DPLL_ID_ICL_TBTPLL:
+	if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
 		icl_dpll_write(dev_priv, pll);
-		break;
-	case DPLL_ID_ICL_MGPLL1:
-	case DPLL_ID_ICL_MGPLL2:
-	case DPLL_ID_ICL_MGPLL3:
-	case DPLL_ID_ICL_MGPLL4:
+	else
 		icl_mg_pll_write(dev_priv, pll);
-		break;
-	default:
-		MISSING_CASE(id);
-	}
 
 	/*
 	 * DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a..a033d8f 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
 			       struct intel_crtc *crtc,
 			       struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(struct intel_crtc *crtc);
-void intel_enable_shared_dpll(struct intel_crtc *crtc);
-void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
 			       uint32_t pll_id);
 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index db6fa1d..f94a04b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
 			    bool *hdcp_capable);
 };
 
+struct intel_hdcp {
+	const struct intel_hdcp_shim *shim;
+	/* Mutex for hdcp state of the connector */
+	struct mutex mutex;
+	u64 value;
+	struct delayed_work check_work;
+	struct work_struct prop_work;
+};
+
 struct intel_connector {
 	struct drm_connector base;
 	/*
@@ -413,11 +422,7 @@ struct intel_connector {
 	/* Work struct to schedule a uevent on link train failure */
 	struct work_struct modeset_retry_work;
 
-	const struct intel_hdcp_shim *hdcp_shim;
-	struct mutex hdcp_mutex;
-	uint64_t hdcp_value; /* protected by hdcp_mutex */
-	struct delayed_work hdcp_check_work;
-	struct work_struct hdcp_prop_work;
+	struct intel_hdcp hdcp;
 };
 
 struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
 	 */
 	int scaler_id;
 
+	/*
+	 * linked_plane:
+	 *
+	 * ICL planar formats require 2 planes that are updated as pairs.
+	 * This member is used to make sure the other plane is also updated
+	 * when required, and for update_slave() to find the correct
+	 * plane_state to pass as argument.
+	 */
+	struct intel_plane *linked_plane;
+
+	/*
+	 * slave:
+	 * If set don't update use the linked plane's state for updating
+	 * this plane during atomic commit with the update_slave() callback.
+	 *
+	 * It's also used by the watermark code to ignore wm calculations on
+	 * this plane. They're calculated by the linked plane's wm code.
+	 */
+	u32 slave;
+
 	struct drm_intel_sprite_colorkey ckey;
 };
 
@@ -681,6 +706,8 @@ struct intel_crtc_wm_state {
 			/* gen9+ only needs 1-step wm programming */
 			struct skl_pipe_wm optimal;
 			struct skl_ddb_entry ddb;
+			struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+			struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
 		} skl;
 
 		struct {
@@ -713,6 +740,13 @@ struct intel_crtc_wm_state {
 	bool need_postvbl_update;
 };
 
+enum intel_output_format {
+	INTEL_OUTPUT_FORMAT_INVALID,
+	INTEL_OUTPUT_FORMAT_RGB,
+	INTEL_OUTPUT_FORMAT_YCBCR420,
+	INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
 struct intel_crtc_state {
 	struct drm_crtc_state base;
 
@@ -894,14 +928,32 @@ struct intel_crtc_state {
 	u8 active_planes;
 	u8 nv12_planes;
 
+	/* bitmask of planes that will be updated during the commit */
+	u8 update_planes;
+
 	/* HDMI scrambling status */
 	bool hdmi_scrambling;
 
 	/* HDMI High TMDS char rate ratio */
 	bool hdmi_high_tmds_clock_ratio;
 
-	/* output format is YCBCR 4:2:0 */
-	bool ycbcr420;
+	/* Output format RGB/YCBCR etc */
+	enum intel_output_format output_format;
+
+	/* Output down scaling is done in LSPCON device */
+	bool lspcon_downsampling;
+
+	/* Display Stream compression state */
+	struct {
+		bool compression_enable;
+		bool dsc_split;
+		u16 compressed_bpp;
+		u8 slice_count;
+	} dsc_params;
+	struct drm_dsc_config dp_dsc_cfg;
+
+	/* Forward Error correction State */
+	bool fec_enable;
 };
 
 struct intel_crtc {
@@ -974,8 +1026,11 @@ struct intel_plane {
 	void (*update_plane)(struct intel_plane *plane,
 			     const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state);
+	void (*update_slave)(struct intel_plane *plane,
+			     const struct intel_crtc_state *crtc_state,
+			     const struct intel_plane_state *plane_state);
 	void (*disable_plane)(struct intel_plane *plane,
-			      struct intel_crtc *crtc);
+			      const struct intel_crtc_state *crtc_state);
 	bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
 	int (*check_plane)(struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *plane_state);
@@ -1071,13 +1126,13 @@ struct intel_dp {
 	bool link_mst;
 	bool link_trained;
 	bool has_audio;
-	bool detect_done;
 	bool reset_link_params;
-	enum aux_ch aux_ch;
 	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
 	uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
 	uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
 	uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+	u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+	u8 fec_capable;
 	/* source rates */
 	int num_source_rates;
 	const int *source_rates;
@@ -1095,7 +1150,6 @@ struct intel_dp {
 	/* sink or branch descriptor */
 	struct drm_dp_desc desc;
 	struct drm_dp_aux aux;
-	enum intel_display_power_domain aux_power_domain;
 	uint8_t train_set[4];
 	int panel_power_up_delay;
 	int panel_power_down_delay;
@@ -1157,9 +1211,15 @@ struct intel_dp {
 	struct intel_dp_compliance compliance;
 };
 
+enum lspcon_vendor {
+	LSPCON_VENDOR_MCA,
+	LSPCON_VENDOR_PARADE
+};
+
 struct intel_lspcon {
 	bool active;
 	enum drm_lspcon_mode mode;
+	enum lspcon_vendor vendor;
 };
 
 struct intel_digital_port {
@@ -1171,18 +1231,20 @@ struct intel_digital_port {
 	enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
 	bool release_cl2_override;
 	uint8_t max_lanes;
+	/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+	enum aux_ch aux_ch;
 	enum intel_display_power_domain ddi_io_power_domain;
 	enum tc_port_type tc_type;
 
-	void (*write_infoframe)(struct drm_encoder *encoder,
+	void (*write_infoframe)(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len);
-	void (*set_infoframes)(struct drm_encoder *encoder,
+	void (*set_infoframes)(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state);
-	bool (*infoframe_enabled)(struct drm_encoder *encoder,
+	bool (*infoframe_enabled)(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config);
 };
 
@@ -1282,6 +1344,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
 		return NULL;
 }
 
+static inline struct intel_digital_port *
+conn_to_dig_port(struct intel_connector *connector)
+{
+	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
 static inline struct intel_dp_mst_encoder *
 enc_to_mst(struct drm_encoder *encoder)
 {
@@ -1307,6 +1375,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
 	}
 }
 
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct drm_encoder *encoder)
+{
+	return &enc_to_dig_port(encoder)->lspcon;
+}
+
 static inline struct intel_digital_port *
 dp_to_dig_port(struct intel_dp *intel_dp)
 {
@@ -1332,6 +1406,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
 }
 
 static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+				 struct intel_plane *plane)
+{
+	struct drm_plane_state *ret =
+		drm_atomic_get_plane_state(&state->base, &plane->base);
+
+	if (IS_ERR(ret))
+		return ERR_CAST(ret);
+
+	return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+				 struct intel_plane *plane)
+{
+	return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+								   &plane->base));
+}
+
+static inline struct intel_plane_state *
 intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
 				 struct intel_plane *plane)
 {
@@ -1439,12 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
 				 u8 voltage_swing);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 				     bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
-			   struct intel_crtc_state *crtc_state,
-			   struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
-			     struct intel_crtc_state *crtc_state,
-			     struct drm_atomic_state *old_state);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+			enum intel_dpll_id pll_id);
 
 unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
 				   int color_plane, unsigned int height);
@@ -1489,7 +1581,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 		      const char *name, u32 reg, int ref_freq);
@@ -1510,20 +1601,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
 int intel_display_suspend(struct drm_device *dev);
 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
-int intel_connector_init(struct intel_connector *);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
-				    struct intel_encoder *encoder);
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
 			      enum port port);
-
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1629,9 +1712,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
 void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 unsigned int skl_cdclk_get_vco(unsigned int freq);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
 void intel_dp_get_m_n(struct intel_crtc *crtc,
 		      struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+		      enum link_m_n_set m_n);
 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			struct dpll *best_clock);
@@ -1642,6 +1727,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
 void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
 void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config);
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -1671,6 +1758,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
 				   u32 pixel_format, u64 modifier,
 				   unsigned int rotation);
 
+/* intel_connector.c */
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+				 struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
 void intel_csr_load_program(struct drm_i915_private *);
@@ -1696,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 int intel_dp_retrain_link(struct intel_encoder *encoder,
 			  struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+					   const struct intel_crtc_state *crtc_state,
+					   bool enable);
 void intel_dp_encoder_reset(struct drm_encoder *encoder);
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1729,9 +1837,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
 			       unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits);
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1749,6 +1854,16 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+				     int mode_clock, int mode_hdisplay);
+uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+				     int mode_hdisplay);
+
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
 
 static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
@@ -1769,6 +1884,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
 /* vlv_dsi.c */
 void vlv_dsi_init(struct drm_i915_private *dev_priv);
 
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
 /* intel_dsi_dcs_backlight.c */
 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
 
@@ -1859,7 +1977,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
 
-
 /* intel_lvds.c */
 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
 			     i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1867,19 +1984,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
 struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
 
-
-/* intel_modes.c */
-int intel_connector_update_modes(struct drm_connector *connector,
-				 struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-
 /* intel_overlay.c */
-void intel_setup_overlay(struct drm_i915_private *dev_priv);
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
 int intel_overlay_switch_off(struct intel_overlay *overlay);
 int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
@@ -1908,7 +2015,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
 void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-void intel_panel_destroy_backlight(struct drm_connector *connector);
 extern struct drm_display_mode *intel_find_panel_downclock(
 				struct drm_i915_private *dev_priv,
 				struct drm_display_mode *fixed_mode,
@@ -1937,6 +2043,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
 int intel_hdcp_disable(struct intel_connector *connector);
 int intel_hdcp_check_link(struct intel_connector *connector);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
 
 /* intel_psr.c */
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1962,12 +2069,18 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
 void intel_psr_short_pulse(struct intel_dp *intel_dp);
 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 			    u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+/* intel_quirks.c */
+void intel_init_quirks(struct drm_i915_private *dev_priv);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
 void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_power_domains_enable(struct drm_i915_private *dev_priv);
 void intel_power_domains_disable(struct drm_i915_private *dev_priv);
 
@@ -2091,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
 void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+			       struct skl_ddb_entry *ddb_y,
+			       struct skl_ddb_entry *ddb_uv);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2102,10 +2218,13 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
 			 const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
-				 const struct skl_ddb_entry **entries,
-				 const struct skl_ddb_entry *ddb,
-				 int ignore);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+				 const struct skl_ddb_entry entries[],
+				 int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 				  struct intel_crtc_state *cstate);
@@ -2128,23 +2247,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-void skl_update_plane(struct intel_plane *plane,
-		      const struct intel_crtc_state *crtc_state,
-		      const struct intel_plane_state *plane_state);
-void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
-		       enum pipe pipe, enum plane_id plane_id);
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
-			  enum pipe pipe, enum plane_id plane_id);
-unsigned int skl_plane_max_stride(struct intel_plane *plane,
-				  u32 pixel_format, u64 modifier,
-				  unsigned int rotation);
-int skl_plane_check(struct intel_crtc_state *crtc_state,
-		    struct intel_plane_state *plane_state);
 int intel_plane_check_stride(const struct intel_plane_state *plane_state);
 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
 int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+			   enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+	/* Don't need to do a gen check, these planes are only available on gen11 */
+	if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+		return true;
+
+	return false;
+}
+
+static inline bool icl_is_hdr_plane(struct intel_plane *plane)
+{
+	if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
+		return false;
+
+	return plane->id < PLANE_SPRITE2;
+}
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2186,11 +2311,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			       struct intel_crtc_state *crtc_state);
 
 /* intel_atomic_plane.c */
-struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
 struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
 void intel_plane_destroy_state(struct drm_plane *plane,
 			       struct drm_plane_state *state);
 extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+			       struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+				struct intel_crtc *crtc);
 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
 					struct intel_crtc_state *crtc_state,
 					const struct intel_plane_state *old_plane_state,
@@ -2206,6 +2336,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
 bool lspcon_init(struct intel_digital_port *intel_dig_port);
 void lspcon_resume(struct intel_lspcon *lspcon);
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+			    const struct intel_crtc_state *crtc_state,
+			    unsigned int type,
+			    const void *buf, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+			   bool enable,
+			   const struct intel_crtc_state *crtc_state,
+			   const struct drm_connector_state *conn_state);
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+			    struct intel_crtc_state *crtc_state);
 
 /* intel_pipe_crc.c */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 0000000..5fec02a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+	if (WARN_ON(bpp < 0))
+		bpp = 16;
+
+	return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+	switch (intel_dsi->escape_clk_div) {
+	default:
+	case 0:
+		return 50;
+	case 1:
+		return 100;
+	case 2:
+		return 200;
+	}
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!intel_connector->panel.fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		return 0;
+	}
+
+	mode = drm_mode_duplicate(connector->dev,
+				  intel_connector->panel.fixed_mode);
+	if (!mode) {
+		DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+		return 0;
+	}
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+					  struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+		if (fixed_mode->clock > max_dotclk)
+			return MODE_CLOCK_HIGH;
+	}
+
+	return MODE_OK;
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+					   const struct mipi_dsi_host_ops *funcs,
+					   enum port port)
+{
+	struct intel_dsi_host *host;
+	struct mipi_dsi_device *device;
+
+	host = kzalloc(sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return NULL;
+
+	host->base.ops = funcs;
+	host->intel_dsi = intel_dsi;
+	host->port = port;
+
+	/*
+	 * We should call mipi_dsi_host_register(&host->base) here, but we don't
+	 * have a host->dev, and we don't have OF stuff either. So just use the
+	 * dsi framework as a library and hope for the best. Create the dsi
+	 * devices by ourselves here too. Need to be careful though, because we
+	 * don't initialize any of the driver model devices here.
+	 */
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (!device) {
+		kfree(host);
+		return NULL;
+	}
+
+	device->host = &host->base;
+	host->device = device;
+
+	return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	enum drm_panel_orientation orientation;
+
+	orientation = dev_priv->vbt.dsi.orientation;
+	if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+		return orientation;
+
+	orientation = dev_priv->vbt.orientation;
+	if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+		return orientation;
+
+	return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb..d968f1f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
 	u16 dcs_backlight_ports;
 	u16 dcs_cabc_ports;
 
+	/* RGB or BGR */
+	bool bgr_enabled;
+
 	u8 pixel_overlap;
 	u32 port_bits;
 	u32 bw_timer;
 	u32 dphy_reg;
+
+	/* data lanes dphy timing */
+	u32 dphy_data_lane_reg;
 	u32 video_frmt_cfg_bits;
 	u16 lp_byte_clk;
 
 	/* timeouts in byte clocks */
+	u16 hs_tx_timeout;
 	u16 lp_rx_timeout;
 	u16 turn_arnd_val;
 	u16 rst_timer_val;
@@ -129,9 +136,36 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
 	return container_of(encoder, struct intel_dsi, base.base);
 }
 
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+	return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
+/* intel_dsi.c */
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+
 /* vlv_dsi.c */
 void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
 enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+					  struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+					   const struct mipi_dsi_host_ops *funcs,
+					   enum port port);
 
 /* vlv_dsi_pll.c */
 int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +192,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
 int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
 void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 				 enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
 
 #endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b..a1a8b37 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
 #define CHV_GPIO_PAD_CFG1(f, i)		(0x4400 + (f) * 0x400 + (i) * 8 + 4)
 #define  CHV_GPIO_CFGLOCK		(1 << 31)
 
+/* ICL DSI Display GPIO Pins */
+#define  ICL_GPIO_DDSP_HPD_A		0
+#define  ICL_GPIO_L_VDDEN_1		1
+#define  ICL_GPIO_L_BKLTEN_1		2
+#define  ICL_GPIO_DDPA_CTRLCLK_1	3
+#define  ICL_GPIO_DDPA_CTRLDATA_1	4
+#define  ICL_GPIO_DDSP_HPD_B		5
+#define  ICL_GPIO_L_VDDEN_2		6
+#define  ICL_GPIO_L_BKLTEN_2		7
+#define  ICL_GPIO_DDPA_CTRLCLK_2	8
+#define  ICL_GPIO_DDPA_CTRLDATA_2	9
+
 static inline enum port intel_dsi_seq_port_to_port(u8 port)
 {
 	return port ? PORT_C : PORT_A;
@@ -111,6 +123,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
 static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 				       const u8 *data)
 {
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
 	struct mipi_dsi_device *dsi_device;
 	u8 type, flags, seq_port;
 	u16 len;
@@ -181,7 +194,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 		break;
 	}
 
-	vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+	if (!IS_ICELAKE(dev_priv))
+		vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
 
 out:
 	data += len;
@@ -322,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
 	gpiod_set_value(gpio_desc, value);
 }
 
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+			  u8 gpio_source, u8 gpio_index, bool value)
+{
+	DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
 	struct drm_device *dev = intel_dsi->base.base.dev;
@@ -345,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	/* pull up/down */
 	value = *data++ & 1;
 
-	if (IS_VALLEYVIEW(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+	else if (IS_VALLEYVIEW(dev_priv))
 		vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
 	else if (IS_CHERRYVIEW(dev_priv))
 		chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -481,6 +503,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 	}
 }
 
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+
+	/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+	if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+		return;
+
+	msleep(msec);
+}
+
 int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
 {
 	struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +532,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
 	return 1;
 }
 
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+#define ICL_PREPARE_CNT_MAX	0x7
+#define ICL_CLK_ZERO_CNT_MAX	0xf
+#define ICL_TRAIL_CNT_MAX	0x7
+#define ICL_TCLK_PRE_CNT_MAX	0x3
+#define ICL_TCLK_POST_CNT_MAX	0x7
+#define ICL_HS_ZERO_CNT_MAX	0xf
+#define ICL_EXIT_ZERO_CNT_MAX	0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 {
 	struct drm_device *dev = intel_dsi->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
-	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
-	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
-	u32 bpp;
-	u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
+	u32 tlpx_ns;
+	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+	u32 ths_prepare_ns, tclk_trail_ns;
+	u32 hs_zero_cnt;
+	u32 tclk_pre_cnt, tclk_post_cnt;
+
+	tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+	tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+	ths_prepare_ns = max(mipi_config->ths_prepare,
+			     mipi_config->tclk_prepare);
+
+	/*
+	 * prepare cnt in escape clocks
+	 * this field represents a hexadecimal value with a precision
+	 * of 1.2 – i.e. the most significant bit is the integer
+	 * and the least significant 2 bits are fraction bits.
+	 * so, the field can represent a range of 0.25 to 1.75
+	 */
+	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+	if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+		DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+		prepare_cnt = ICL_PREPARE_CNT_MAX;
+	}
+
+	/* clk zero count in escape clocks */
+	clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+				    ths_prepare_ns, tlpx_ns);
+	if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+		DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+		clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
+	}
+
+	/* trail cnt in escape clocks*/
+	trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+	if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+		DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+		trail_cnt = ICL_TRAIL_CNT_MAX;
+	}
+
+	/* tclk pre count in escape clocks */
+	tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+	if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+		DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+		tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+	}
+
+	/* tclk post count in escape clocks */
+	tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+	if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+		DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+		tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+	}
+
+	/* hs zero cnt in escape clocks */
+	hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+				   ths_prepare_ns, tlpx_ns);
+	if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+		DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+		hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+	}
+
+	/* hs exit zero cnt in escape clocks */
+	exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+	if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+		DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+		exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+	}
+
+	/* clock lane dphy timings */
+	intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+			       CLK_PREPARE(prepare_cnt) |
+			       CLK_ZERO_OVERRIDE |
+			       CLK_ZERO(clk_zero_cnt) |
+			       CLK_PRE_OVERRIDE |
+			       CLK_PRE(tclk_pre_cnt) |
+			       CLK_POST_OVERRIDE |
+			       CLK_POST(tclk_post_cnt) |
+			       CLK_TRAIL_OVERRIDE |
+			       CLK_TRAIL(trail_cnt));
+
+	/* data lanes dphy timings */
+	intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+					 HS_PREPARE(prepare_cnt) |
+					 HS_ZERO_OVERRIDE |
+					 HS_ZERO(hs_zero_cnt) |
+					 HS_TRAIL_OVERRIDE |
+					 HS_TRAIL(trail_cnt) |
+					 HS_EXIT_OVERRIDE |
+					 HS_EXIT(exit_zero_cnt));
+}
+
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	u32 tlpx_ns, extra_byte_count, tlpx_ui;
 	u32 ui_num, ui_den;
 	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
 	u32 ths_prepare_ns, tclk_trail_ns;
 	u32 tclk_prepare_clkzero, ths_prepare_hszero;
 	u32 lp_to_hs_switch, hs_to_lp_switch;
-	u32 pclk, computed_ddr;
 	u32 mul;
-	u16 burst_mode_ratio;
-	enum port port;
 
-	DRM_DEBUG_KMS("\n");
-
-	intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
-	intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
-	intel_dsi->lane_count = mipi_config->lane_cnt + 1;
-	intel_dsi->pixel_format =
-			pixel_format_from_register_bits(
-				mipi_config->videomode_color_format << 7);
-	bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
-
-	intel_dsi->dual_link = mipi_config->dual_link;
-	intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
-	intel_dsi->operation_mode = mipi_config->is_cmd_mode;
-	intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
-	intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
-	intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
-	intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
-	intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
-	intel_dsi->init_count = mipi_config->master_init_timer;
-	intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
-	intel_dsi->video_frmt_cfg_bits =
-		mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
-
-	pclk = mode->clock;
-
-	/* In dual link mode each port needs half of pixel clock */
-	if (intel_dsi->dual_link) {
-		pclk = pclk / 2;
-
-		/* we can enable pixel_overlap if needed by panel. In this
-		 * case we need to increase the pixelclock for extra pixels
-		 */
-		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
-			pclk += DIV_ROUND_UP(mode->vtotal *
-						intel_dsi->pixel_overlap *
-						60, 1000);
-		}
-	}
-
-	/* Burst Mode Ratio
-	 * Target ddr frequency from VBT / non burst ddr freq
-	 * multiply by 100 to preserve remainder
-	 */
-	if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
-		if (mipi_config->target_burst_mode_freq) {
-			computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
-
-			if (mipi_config->target_burst_mode_freq <
-								computed_ddr) {
-				DRM_ERROR("Burst mode freq is less than computed\n");
-				return false;
-			}
-
-			burst_mode_ratio = DIV_ROUND_UP(
-				mipi_config->target_burst_mode_freq * 100,
-				computed_ddr);
-
-			pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
-		} else {
-			DRM_ERROR("Burst mode target is not set\n");
-			return false;
-		}
-	} else
-		burst_mode_ratio = 100;
-
-	intel_dsi->burst_mode_ratio = burst_mode_ratio;
-	intel_dsi->pclk = pclk;
-
-	bitrate = (pclk * bpp) / intel_dsi->lane_count;
-
-	switch (intel_dsi->escape_clk_div) {
-	case 0:
-		tlpx_ns = 50;
-		break;
-	case 1:
-		tlpx_ns = 100;
-		break;
-
-	case 2:
-		tlpx_ns = 200;
-		break;
-	default:
-		tlpx_ns = 50;
-		break;
-	}
+	tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
 
 	switch (intel_dsi->lane_count) {
 	case 1:
@@ -620,7 +668,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
 	/* in Kbps */
 	ui_num = NS_KHZ_RATIO;
-	ui_den = bitrate;
+	ui_den = intel_dsi_bitrate(intel_dsi);
 
 	tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
 	ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +794,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 		DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
 			8);
 	intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+	struct drm_device *dev = intel_dsi->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+	u16 burst_mode_ratio;
+	enum port port;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+	intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+	intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+	intel_dsi->pixel_format =
+			pixel_format_from_register_bits(
+				mipi_config->videomode_color_format << 7);
+
+	intel_dsi->dual_link = mipi_config->dual_link;
+	intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+	intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+	intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+	intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+	intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+	intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+	intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+	intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+	intel_dsi->init_count = mipi_config->master_init_timer;
+	intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+	intel_dsi->video_frmt_cfg_bits =
+		mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+	intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+	/* Starting point, adjusted depending on dual link and burst mode */
+	intel_dsi->pclk = mode->clock;
+
+	/* In dual link mode each port needs half of pixel clock */
+	if (intel_dsi->dual_link) {
+		intel_dsi->pclk /= 2;
+
+		/* we can enable pixel_overlap if needed by panel. In this
+		 * case we need to increase the pixelclock for extra pixels
+		 */
+		if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+			intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+		}
+	}
+
+	/* Burst Mode Ratio
+	 * Target ddr frequency from VBT / non burst ddr freq
+	 * multiply by 100 to preserve remainder
+	 */
+	if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+		if (mipi_config->target_burst_mode_freq) {
+			u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+			if (mipi_config->target_burst_mode_freq < bitrate) {
+				DRM_ERROR("Burst mode freq is less than computed\n");
+				return false;
+			}
+
+			burst_mode_ratio = DIV_ROUND_UP(
+				mipi_config->target_burst_mode_freq * 100,
+				bitrate);
+
+			intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+		} else {
+			DRM_ERROR("Burst mode target is not set\n");
+			return false;
+		}
+	} else
+		burst_mode_ratio = 100;
+
+	intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+	if (IS_ICELAKE(dev_priv))
+		icl_dphy_param_init(intel_dsi);
+	else
+		vlv_dphy_param_init(intel_dsi);
 
 	DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
 	DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff..0042a7f 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	return true;
 }
 
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
-static void intel_dvo_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-	intel_panel_fini(&to_intel_connector(connector)->panel);
-	kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 	.detect = intel_dvo_detect,
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_dvo_destroy,
+	.destroy = intel_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 76b5f94..af28734 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
 	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
 
-	if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+	if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
 		return -EINVAL;
 
-	if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
 		return -EINVAL;
 
-	if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+	if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
 		return -EINVAL;
 
 	GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
 
 	WARN_ON(ring_mask == 0);
 	WARN_ON(ring_mask &
-		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+		GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+	if (i915_inject_load_failure())
+		return -ENODEV;
 
 	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
 		if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
 		err = -EINVAL;
 		err_id = id;
 
-		if (GEM_WARN_ON(!init))
+		if (GEM_DEBUG_WARN_ON(!init))
 			goto cleanup;
 
 		err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 
 	execlists->port_mask = 1;
-	BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
 	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
 	execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
 	i915_timeline_init(engine->i915, &engine->timeline, engine->name);
-	lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
+	i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
 
 	intel_engine_init_execlist(engine);
 	intel_engine_init_hangcheck(engine);
@@ -679,7 +682,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
 	i915_timeline_fini(&engine->timeline);
 
+	intel_wa_list_free(&engine->ctx_wa_list);
 	intel_wa_list_free(&engine->wa_list);
+	intel_wa_list_free(&engine->whitelist);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -769,7 +774,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
 	u32 slice = fls(sseu->slice_mask);
 	u32 subslice = fls(sseu->subslice_mask[slice]);
 
-	if (INTEL_GEN(dev_priv) == 10)
+	if (IS_GEN10(dev_priv))
 		mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
 				  GEN8_MCR_SUBSLICE(subslice);
 	else if (INTEL_GEN(dev_priv) >= 11)
@@ -1494,10 +1499,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	count = 0;
 	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p =
-			rb_entry(rb, typeof(*p), node);
+		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+		int i;
 
-		list_for_each_entry(rq, &p->requests, sched.link) {
+		priolist_for_each_request(rq, p, i) {
 			if (count++ < MAX_REQUESTS_TO_SHOW - 1)
 				print_request(m, rq, "\t\tQ ");
 			else
@@ -1519,8 +1524,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
 
-		drm_printf(m, "\t%s [%d] waiting for %x\n",
-			   w->tsk->comm, w->tsk->pid, w->seqno);
+		drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
+			   w->tsk->comm, w->tsk->pid,
+			   task_state_to_char(w->tsk),
+			   w->seqno);
 	}
 	spin_unlock(&b->rb_lock);
 	local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c..f23570c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
 	int lines;
 
 	intel_fbc_get_plane_source_size(cache, NULL, &lines);
-	if (INTEL_GEN(dev_priv) == 7)
+	if (IS_GEN7(dev_priv))
 		lines = min(lines, 2048);
 	else if (INTEL_GEN(dev_priv) >= 8)
 		lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	cache->plane.adjusted_y = plane_state->color_plane[0].y;
 	cache->plane.y = plane_state->base.src.y1 >> 16;
 
+	cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
 	if (!cache->plane.visible)
 		return;
 
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 	}
 
+	if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+	    cache->fb.format->has_alpha) {
+		fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+		return false;
+	}
+
 	/* WaFbcExceedCdClockThreshold:hsw,bdw */
 	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
 	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
@@ -1301,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 	fbc->active = false;
 
 	if (need_fbc_vtd_wa(dev_priv))
-		mkwrite_device_info(dev_priv)->has_fbc = false;
+		mkwrite_device_info(dev_priv)->display.has_fbc = false;
 
 	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
 	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f993329..fb5bb5b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 		 * pipe.  Note we need to use the selected fb's pitch and bpp
 		 * rather than the current pipe's, since they differ.
 		 */
-		cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+		cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
 		cur_size = cur_size * fb->base.format->cpp[0];
 		if (fb->base.pitches[0] < cur_size) {
 			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 			break;
 		}
 
-		cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+		cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
 		cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
 		cur_size *= fb->base.pitches[0];
 		DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
 			      pipe_name(intel_crtc->pipe),
-			      intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
-			      intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+			      crtc->state->adjusted_mode.crtc_hdisplay,
+			      crtc->state->adjusted_mode.crtc_vdisplay,
 			      fb->base.format->cpp[0] * 8,
 			      cur_size);
 
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
 	struct intel_fbdev *ifbdev;
 	int ret;
 
-	if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+	if (WARN_ON(!HAS_DISPLAY(dev_priv)))
 		return -ENODEV;
 
 	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea6..8660af3 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
 	unsigned int i;
 
 	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
-	guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+	guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+	BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
 
 	for (i = 0; i < guc->send_regs.count; i++) {
 		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
+/*
+ * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
+ * then return, so waiting on the H2G is not enough to guarantee GuC is done.
+ * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
+ * scratch register 14, so we can poll on that. Note that GuC does not ensure
+ * that the value in the register is different from
+ * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
+ * take care of that ourselves as well.
+ */
+static int guc_sleep_state_action(struct intel_guc *guc,
+				  const u32 *action, u32 len)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	int ret;
+	u32 status;
+
+	I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+
+	ret = intel_guc_send(guc, action, len);
+	if (ret)
+		return ret;
+
+	ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
+					0, 0, 10, &status);
+	if (ret)
+		return ret;
+
+	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
+		DRM_ERROR("GuC failed to change sleep state. "
+			  "action=0x%x, err=%u\n",
+			  action[0], status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 /**
  * intel_guc_suspend() - notify GuC entering suspend state
  * @guc:	the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
 		intel_guc_ggtt_offset(guc, guc->shared_data)
 	};
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
 }
 
 /**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
 		intel_guc_ggtt_offset(guc, guc->shared_data)
 	};
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf..0f1c4f9 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
 	void (*notify)(struct intel_guc *guc);
 };
 
+static inline bool intel_guc_is_alive(struct intel_guc *guc)
+{
+	return intel_uc_fw_is_loaded(&guc->fw);
+}
+
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcc..a67144e 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
 		guc_fw->major_ver_wanted = KBL_FW_MAJOR;
 		guc_fw->minor_ver_wanted = KBL_FW_MINOR;
 	} else {
-		DRM_WARN("%s: No firmware known for this platform!\n",
+		dev_info(dev_priv->drm.dev,
+			 "%s: No firmware known for this platform!\n",
 			 intel_uc_fw_type_repr(guc_fw->type));
 	}
 }
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
 }
 
 /* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_uc_fw *guc_fw = &guc->fw;
-	struct sg_table *sg = vma->pages;
 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
 	int i;
 
-	if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
-			       guc_fw->rsa_offset) != sizeof(rsa))
-		return -EINVAL;
+	sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
+			   rsa, sizeof(rsa), guc->fw.rsa_offset);
 
 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
 		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
-	return 0;
 }
 
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_uc_fw *guc_fw = &guc->fw;
-	unsigned long offset;
-	u32 status;
-	int ret;
 
-	/*
-	 * The header plus uCode will be copied to WOPCM via DMA, excluding any
-	 * other components
-	 */
-	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
-	/* Set the source address for the new blob */
-	offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
-	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
-	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
-	/*
-	 * Set the DMA destination. Current uCode expects the code to be
-	 * loaded at 8k; locations below this are used for the stack.
-	 */
-	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
-	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
-	/* Finally start the DMA */
-	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
-	/* Wait for DMA to finish */
-	ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
-					   2, 100, &status);
-	DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
-
-	return ret;
+	/* Did we complete the xfer? */
+	*status = I915_READ(DMA_CTRL);
+	return !(*status & START_DMA);
 }
 
 /*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
 	 * NB: Docs recommend not using the interrupt for completion.
 	 * Measurements indicate this should take no more than 20ms, so a
 	 * timeout here indicates that the GuC has failed and is unusable.
-	 * (Higher levels of the driver will attempt to fall back to
-	 * execlist mode if this happens.)
+	 * (Higher levels of the driver may decide to reset the GuC and
+	 * attempt the ucode load again if this happens.)
 	 */
 	ret = wait_for(guc_ready(guc, &status), 100);
 	DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
 		ret = -ENOEXEC;
 	}
 
+	if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+		DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
+			  status);
+		ret = -ENXIO;
+	}
+
 	return ret;
 }
 
 /*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ */
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_uc_fw *guc_fw = &guc->fw;
+	unsigned long offset;
+
+	/*
+	 * The header plus uCode will be copied to WOPCM via DMA, excluding any
+	 * other components
+	 */
+	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+
+	/* Set the source address for the new blob */
+	offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+	/*
+	 * Set the DMA destination. Current uCode expects the code to be
+	 * loaded at 8k; locations below this are used for the stack.
+	 */
+	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+	/* Finally start the DMA */
+	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+	return guc_wait_ucode(guc);
+}
+/*
  * Load the GuC firmware blob into the MinuteIA.
  */
 static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 	 * by the DMA engine in one operation, whereas the RSA signature is
 	 * loaded via MMIO.
 	 */
-	ret = guc_xfer_rsa(guc, vma);
-	if (ret)
-		DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+	guc_xfer_rsa(guc, vma);
 
 	ret = guc_xfer_ucode(guc, vma);
-	if (ret)
-		DRM_WARN("GuC firmware code xfer error %d\n", ret);
-
-	ret = guc_wait_ucode(guc);
-	if (ret)
-		DRM_ERROR("GuC firmware xfer error %d\n", ret);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d59..b2f5148 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
 #define GUC_VIDEO_ENGINE2		4
 #define GUC_MAX_ENGINES_NUM		(GUC_VIDEO_ENGINE2 + 1)
 
+#define GUC_DOORBELL_INVALID		256
+
+#define GUC_DB_SIZE			(PAGE_SIZE)
+#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
+
 /* Work queue item header definitions */
 #define WQ_STATUS_ACTIVE		1
 #define WQ_STATUS_SUSPENDED		2
@@ -59,9 +64,6 @@
 #define WQ_RING_TAIL_MAX		0x7FF	/* 2^11 QWords */
 #define WQ_RING_TAIL_MASK		(WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
 
-#define GUC_DOORBELL_ENABLED		1
-#define GUC_DOORBELL_DISABLED		0
-
 #define GUC_STAGE_DESC_ATTR_ACTIVE	BIT(0)
 #define GUC_STAGE_DESC_ATTR_PENDING_DB	BIT(1)
 #define GUC_STAGE_DESC_ATTR_KERNEL	BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
 	u32 header_info;
 } __packed;
 
-struct guc_doorbell_info {
-	u32 db_status;
-	u32 cookie;
-	u32 reserved[14];
-} __packed;
-
-union guc_doorbell_qw {
-	struct {
-		u32 db_status;
-		u32 cookie;
-	};
-	u64 value_qw;
-} __packed;
-
-#define GUC_NUM_DOORBELLS	256
-#define GUC_DOORBELL_INVALID	(GUC_NUM_DOORBELLS)
-
-#define GUC_DB_SIZE			(PAGE_SIZE)
-#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
-
 /* Work item for submitting workloads into work queue of GuC. */
 struct guc_wq_item {
 	u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
  * registers, where first register holds data treated as message header,
  * and other registers are used to hold message payload.
  *
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
  *
  *      +-----------+---------+---------+---------+
  *      |  MMIO[0]  | MMIO[1] |   ...   | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
  *   field.
  */
 
+#define GUC_MAX_MMIO_MSG_LEN		8
+
 #define INTEL_GUC_MSG_TYPE_SHIFT	28
 #define INTEL_GUC_MSG_TYPE_MASK		(0xF << INTEL_GUC_MSG_TYPE_SHIFT)
 #define INTEL_GUC_MSG_DATA_SHIFT	16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
 	INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
 };
 
+enum intel_guc_sleep_state_status {
+	INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
+	INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
+	INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
 #define GUC_LOG_CONTROL_LOGGING_ENABLED	(1 << 0)
 #define GUC_LOG_CONTROL_VERBOSITY_SHIFT	4
 #define GUC_LOG_CONTROL_VERBOSITY_MASK	(0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d860847..57e7ad5 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
 #define GUC_SEND_INTERRUPT		_MMIO(0xc4c8)
 #define   GUC_SEND_TRIGGER		  (1<<0)
 
+#define GUC_NUM_DOORBELLS		256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+	u32 db_status;
+#define GUC_DOORBELL_DISABLED		0
+#define GUC_DOORBELL_ENABLED		1
+
+	u32 cookie;
+	u32 reserved[14];
+} __packed;
+
 #define GEN8_DRBREGL(x)			_MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID		  (1<<0)
 #define GEN8_DRBREGU(x)			_MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d..1570dcb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
 	return client->vaddr + client->doorbell_offset;
 }
 
-static void __create_doorbell(struct intel_guc_client *client)
+static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
+	return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+}
+
+static void __init_doorbell(struct intel_guc_client *client)
 {
 	struct guc_doorbell_info *doorbell;
 
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
 	doorbell->cookie = 0;
 }
 
-static void __destroy_doorbell(struct intel_guc_client *client)
+static void __fini_doorbell(struct intel_guc_client *client)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
 	struct guc_doorbell_info *doorbell;
 	u16 db_id = client->doorbell_id;
 
 	doorbell = __get_doorbell(client);
 	doorbell->db_status = GUC_DOORBELL_DISABLED;
-	doorbell->cookie = 0;
 
 	/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
 	 * to go to zero after updating db_status before we call the GuC to
 	 * release the doorbell
 	 */
-	if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+	if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
 		WARN_ONCE(true, "Doorbell never became invalid after disable\n");
 }
 
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
 		return -ENODEV; /* internal setup error, should never happen */
 
 	__update_doorbell_desc(client, client->doorbell_id);
-	__create_doorbell(client);
+	__init_doorbell(client);
 
 	ret = __guc_allocate_doorbell(client->guc, client->stage_id);
 	if (ret) {
-		__destroy_doorbell(client);
+		__fini_doorbell(client);
 		__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
 		DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
 				 client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
 
 	GEM_BUG_ON(!has_doorbell(client));
 
-	__destroy_doorbell(client);
+	__fini_doorbell(client);
 	ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
 	if (ret)
 		DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_proc_desc_init(struct intel_guc *guc,
-			       struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc_client *client)
 {
 	struct guc_process_desc *desc;
 
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
 	desc->priority = client->priority;
 }
 
+static void guc_proc_desc_fini(struct intel_guc_client *client)
+{
+	struct guc_process_desc *desc;
+
+	desc = __get_process_desc(client);
+	memset(desc, 0, sizeof(*desc));
+}
+
 static int guc_stage_desc_pool_create(struct intel_guc *guc)
 {
 	struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
  * data structures relating to this client (doorbell, process descriptor,
  * write queue, etc).
  */
-static void guc_stage_desc_init(struct intel_guc *guc,
-				struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc_client *client)
 {
+	struct intel_guc *guc = client->guc;
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 	desc->desc_private = ptr_to_u64(client);
 }
 
-static void guc_stage_desc_fini(struct intel_guc *guc,
-				struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc_client *client)
 {
 	struct guc_stage_desc *desc;
 
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
 	WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
 }
 
-static void guc_reset_wq(struct intel_guc_client *client)
-{
-	struct guc_process_desc *desc = __get_process_desc(client);
-
-	desc->head = 0;
-	desc->tail = 0;
-}
-
 static void guc_ring_doorbell(struct intel_guc_client *client)
 {
 	struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			if (last && rq->hw_context != last->hw_context) {
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				if (submit)
 					port_assign(port, last);
 				port++;
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
 
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -791,19 +793,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 
 static void guc_dequeue(struct intel_engine_cs *engine)
 {
-	unsigned long flags;
-	bool submit;
-
-	local_irq_save(flags);
-
-	spin_lock(&engine->timeline.lock);
-	submit = __guc_dequeue(engine);
-	spin_unlock(&engine->timeline.lock);
-
-	if (submit)
+	if (__guc_dequeue(engine))
 		guc_submit(engine);
-
-	local_irq_restore(flags);
 }
 
 static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct execlist_port *port = execlists->port;
 	struct i915_request *rq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	rq = port_request(port);
 	while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
 
 	if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
 		guc_dequeue(engine);
+
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
 /* Check that a doorbell register is in the expected state */
 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	u32 drbregl;
 	bool valid;
 
-	GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+	GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
 
-	drbregl = I915_READ(GEN8_DRBREGL(db_id));
-	valid = drbregl & GEN8_DRB_VALID;
+	valid = __doorbell_valid(guc, db_id);
 
 	if (test_bit(db_id, guc->doorbell_bitmap) == valid)
 		return true;
 
-	DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
-			 db_id, drbregl, yesno(valid));
+	DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
+			 db_id, yesno(valid));
 
 	return false;
 }
 
 static bool guc_verify_doorbells(struct intel_guc *guc)
 {
+	bool doorbells_ok = true;
 	u16 db_id;
 
 	for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
 		if (!doorbell_ok(guc, db_id))
-			return false;
+			doorbells_ok = false;
 
-	return true;
-}
-
-static int guc_clients_doorbell_init(struct intel_guc *guc)
-{
-	int ret;
-
-	ret = create_doorbell(guc->execbuf_client);
-	if (ret)
-		return ret;
-
-	if (guc->preempt_client) {
-		ret = create_doorbell(guc->preempt_client);
-		if (ret) {
-			destroy_doorbell(guc->execbuf_client);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static void guc_clients_doorbell_fini(struct intel_guc *guc)
-{
-	/*
-	 * By the time we're here, GuC has already been reset.
-	 * Instead of trying (in vain) to communicate with it, let's just
-	 * cleanup the doorbell HW and our internal state.
-	 */
-	if (guc->preempt_client) {
-		__destroy_doorbell(guc->preempt_client);
-		__update_doorbell_desc(guc->preempt_client,
-				       GUC_DOORBELL_INVALID);
-	}
-
-	if (guc->execbuf_client) {
-		__destroy_doorbell(guc->execbuf_client);
-		__update_doorbell_desc(guc->execbuf_client,
-				       GUC_DOORBELL_INVALID);
-	}
+	return doorbells_ok;
 }
 
 /**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	}
 	client->vaddr = vaddr;
 
+	ret = reserve_doorbell(client);
+	if (ret)
+		goto err_vaddr;
+
 	client->doorbell_offset = __select_cacheline(guc);
 
 	/*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	else
 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-	guc_proc_desc_init(guc, client);
-	guc_stage_desc_init(guc, client);
-
-	ret = reserve_doorbell(client);
-	if (ret)
-		goto err_vaddr;
-
 	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
 			 priority, client, client->engines, client->stage_id);
 	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 static void guc_client_free(struct intel_guc_client *client)
 {
 	unreserve_doorbell(client);
-	guc_stage_desc_fini(client->guc, client);
 	i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
 	ida_simple_remove(&client->guc->stage_ids, client->stage_id);
 	kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
 		guc_client_free(client);
 }
 
+static int __guc_client_enable(struct intel_guc_client *client)
+{
+	int ret;
+
+	guc_proc_desc_init(client);
+	guc_stage_desc_init(client);
+
+	ret = create_doorbell(client);
+	if (ret)
+		goto fail;
+
+	return 0;
+
+fail:
+	guc_stage_desc_fini(client);
+	guc_proc_desc_fini(client);
+	return ret;
+}
+
+static void __guc_client_disable(struct intel_guc_client *client)
+{
+	/*
+	 * By the time we're here, GuC may have already been reset. if that is
+	 * the case, instead of trying (in vain) to communicate with it, let's
+	 * just cleanup the doorbell HW and our internal state.
+	 */
+	if (intel_guc_is_alive(client->guc))
+		destroy_doorbell(client);
+	else
+		__fini_doorbell(client);
+
+	guc_stage_desc_fini(client);
+	guc_proc_desc_fini(client);
+}
+
+static int guc_clients_enable(struct intel_guc *guc)
+{
+	int ret;
+
+	ret = __guc_client_enable(guc->execbuf_client);
+	if (ret)
+		return ret;
+
+	if (guc->preempt_client) {
+		ret = __guc_client_enable(guc->preempt_client);
+		if (ret) {
+			__guc_client_disable(guc->execbuf_client);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void guc_clients_disable(struct intel_guc *guc)
+{
+	if (guc->preempt_client)
+		__guc_client_disable(guc->preempt_client);
+
+	if (guc->execbuf_client)
+		__guc_client_disable(guc->execbuf_client);
+}
+
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
 
 	GEM_BUG_ON(!guc->execbuf_client);
 
-	guc_reset_wq(guc->execbuf_client);
-	if (guc->preempt_client)
-		guc_reset_wq(guc->preempt_client);
-
 	err = intel_guc_sample_forcewake(guc);
 	if (err)
 		return err;
 
-	err = guc_clients_doorbell_init(guc);
+	err = guc_clients_enable(guc);
 	if (err)
 		return err;
 
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
 	GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
 
 	guc_interrupts_release(dev_priv);
-	guc_clients_doorbell_fini(guc);
+	guc_clients_disable(guc);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc..1bf487f 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
 
 #define KEY_LOAD_TRIES	5
 
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+	int i, ones = 0;
+	/* KSV has 20 1's and 20 0's */
+	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+		ones += hweight8(ksv[i]);
+	if (ones != 20)
+		return false;
+
+	return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+			       const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+	int ret, i, tries = 2;
+
+	/* HDCP spec states that we must retry the bksv if it is invalid */
+	for (i = 0; i < tries; i++) {
+		ret = shim->read_bksv(intel_dig_port, bksv);
+		if (ret)
+			return ret;
+		if (intel_hdcp_is_ksv_valid(bksv))
+			break;
+	}
+	if (i == tries) {
+		DRM_DEBUG_KMS("Bksv is invalid\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+	bool capable = false;
+	u8 bksv[5];
+
+	if (!shim)
+		return capable;
+
+	if (shim->hdcp_capable) {
+		shim->hdcp_capable(intel_dig_port, &capable);
+	} else {
+		if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+			capable = true;
+	}
+
+	return capable;
+}
+
 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
 				    const struct intel_hdcp_shim *shim)
 {
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
 }
 
 static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
-	int i, ones = 0;
-	/* KSV has 20 1's and 20 0's */
-	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
-		ones += hweight8(ksv[i]);
-	if (ones != 20)
-		return false;
-	return true;
-}
-
-static
 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
 				const struct intel_hdcp_shim *shim,
 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
 	if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
 				    HDCP_SHA1_COMPLETE,
 				    HDCP_SHA1_COMPLETE, 1)) {
-		DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
+		DRM_ERROR("Timed out waiting for SHA1 complete\n");
 		return -ETIMEDOUT;
 	}
 	if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
 	if (ret) {
-		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+		DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
 		return ret;
 	}
 
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
-		DRM_ERROR("Max Topology Limit Exceeded\n");
+		DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
 		return -EPERM;
 	}
 
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 	}
 
 	if (i == tries) {
-		DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+		DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
 		goto err;
 	}
 
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 		if (ret)
 			return ret;
 		if (!hdcp_capable) {
-			DRM_ERROR("Panel is not HDCP capable\n");
+			DRM_DEBUG_KMS("Panel is not HDCP capable\n");
 			return -EINVAL;
 		}
 	}
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 
 	memset(&bksv, 0, sizeof(bksv));
 
-	/* HDCP spec states that we must retry the bksv if it is invalid */
-	for (i = 0; i < tries; i++) {
-		ret = shim->read_bksv(intel_dig_port, bksv.shim);
-		if (ret)
-			return ret;
-		if (intel_hdcp_is_ksv_valid(bksv.shim))
-			break;
-	}
-	if (i == tries) {
-		DRM_ERROR("HDCP failed, Bksv is invalid\n");
-		return -ENODEV;
-	}
+	ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+	if (ret < 0)
+		return ret;
 
 	I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
 	I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 	}
 
 	if (i == tries) {
-		DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
-			  I915_READ(PORT_HDCP_STATUS(port)));
+		DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+			      I915_READ(PORT_HDCP_STATUS(port)));
 		return -ETIMEDOUT;
 	}
 
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 	return 0;
 }
 
-static
-struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
-{
-	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
-}
-
 static int _intel_hdcp_disable(struct intel_connector *connector)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
 	enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 		return -ETIMEDOUT;
 	}
 
-	ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
 	if (ret) {
 		DRM_ERROR("Failed to disable HDCP signalling\n");
 		return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 
 static int _intel_hdcp_enable(struct intel_connector *connector)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
 	int i, ret, tries = 3;
 
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 
 	/* Incase of authentication failures, HDCP spec expects reauth. */
 	for (i = 0; i < tries; i++) {
-		ret = intel_hdcp_auth(conn_to_dig_port(connector),
-				      connector->hdcp_shim);
+		ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
 		if (!ret)
 			return 0;
 
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 		_intel_hdcp_disable(connector);
 	}
 
-	DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+	DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
 	return ret;
 }
 
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+	return container_of(hdcp, struct intel_connector, hdcp);
+}
+
 static void intel_hdcp_check_work(struct work_struct *work)
 {
-	struct intel_connector *connector = container_of(to_delayed_work(work),
-							 struct intel_connector,
-							 hdcp_check_work);
+	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+					       struct intel_hdcp,
+					       check_work);
+	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
 	if (!intel_hdcp_check_link(connector))
-		schedule_delayed_work(&connector->hdcp_check_work,
+		schedule_delayed_work(&hdcp->check_work,
 				      DRM_HDCP_CHECK_PERIOD_MS);
 }
 
 static void intel_hdcp_prop_work(struct work_struct *work)
 {
-	struct intel_connector *connector = container_of(work,
-							 struct intel_connector,
-							 hdcp_prop_work);
+	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+					       prop_work);
+	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
 	struct drm_device *dev = connector->base.dev;
 	struct drm_connector_state *state;
 
 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	mutex_lock(&connector->hdcp_mutex);
+	mutex_lock(&hdcp->mutex);
 
 	/*
 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
-	 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
 	 * we're running just after hdcp has been disabled, so just exit
 	 */
-	if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 		state = connector->base.state;
-		state->content_protection = connector->hdcp_value;
+		state->content_protection = hdcp->value;
 	}
 
-	mutex_unlock(&connector->hdcp_mutex);
+	mutex_unlock(&hdcp->mutex);
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
 
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 }
 
 int intel_hdcp_init(struct intel_connector *connector,
-		    const struct intel_hdcp_shim *hdcp_shim)
+		    const struct intel_hdcp_shim *shim)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
 	ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
 	if (ret)
 		return ret;
 
-	connector->hdcp_shim = hdcp_shim;
-	mutex_init(&connector->hdcp_mutex);
-	INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
-	INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+	hdcp->shim = shim;
+	mutex_init(&hdcp->mutex);
+	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
 	return 0;
 }
 
 int intel_hdcp_enable(struct intel_connector *connector)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
-	if (!connector->hdcp_shim)
+	if (!hdcp->shim)
 		return -ENOENT;
 
-	mutex_lock(&connector->hdcp_mutex);
+	mutex_lock(&hdcp->mutex);
 
 	ret = _intel_hdcp_enable(connector);
 	if (ret)
 		goto out;
 
-	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-	schedule_work(&connector->hdcp_prop_work);
-	schedule_delayed_work(&connector->hdcp_check_work,
+	hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+	schedule_work(&hdcp->prop_work);
+	schedule_delayed_work(&hdcp->check_work,
 			      DRM_HDCP_CHECK_PERIOD_MS);
 out:
-	mutex_unlock(&connector->hdcp_mutex);
+	mutex_unlock(&hdcp->mutex);
 	return ret;
 }
 
 int intel_hdcp_disable(struct intel_connector *connector)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret = 0;
 
-	if (!connector->hdcp_shim)
+	if (!hdcp->shim)
 		return -ENOENT;
 
-	mutex_lock(&connector->hdcp_mutex);
+	mutex_lock(&hdcp->mutex);
 
-	if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
 		ret = _intel_hdcp_disable(connector);
 	}
 
-	mutex_unlock(&connector->hdcp_mutex);
-	cancel_delayed_work_sync(&connector->hdcp_check_work);
+	mutex_unlock(&hdcp->mutex);
+	cancel_delayed_work_sync(&hdcp->check_work);
 	return ret;
 }
 
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
 /* Implements Part 3 of the HDCP authorization procedure */
 int intel_hdcp_check_link(struct intel_connector *connector)
 {
+	struct intel_hdcp *hdcp = &connector->hdcp;
 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
 	enum port port = intel_dig_port->base.port;
 	int ret = 0;
 
-	if (!connector->hdcp_shim)
+	if (!hdcp->shim)
 		return -ENOENT;
 
-	mutex_lock(&connector->hdcp_mutex);
+	mutex_lock(&hdcp->mutex);
 
-	if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
 		goto out;
 
 	if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
 			  connector->base.name, connector->base.base.id,
 			  I915_READ(PORT_HDCP_STATUS(port)));
 		ret = -ENXIO;
-		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&connector->hdcp_prop_work);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
 		goto out;
 	}
 
-	if (connector->hdcp_shim->check_link(intel_dig_port)) {
-		if (connector->hdcp_value !=
-		    DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-			connector->hdcp_value =
-				DRM_MODE_CONTENT_PROTECTION_ENABLED;
-			schedule_work(&connector->hdcp_prop_work);
+	if (hdcp->shim->check_link(intel_dig_port)) {
+		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+			schedule_work(&hdcp->prop_work);
 		}
 		goto out;
 	}
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
 	ret = _intel_hdcp_disable(connector);
 	if (ret) {
 		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
-		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&connector->hdcp_prop_work);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
 		goto out;
 	}
 
 	ret = _intel_hdcp_enable(connector);
 	if (ret) {
-		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
-		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&connector->hdcp_prop_work);
+		DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
 		goto out;
 	}
 
 out:
-	mutex_unlock(&connector->hdcp_mutex);
+	mutex_unlock(&hdcp->mutex);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a2dab0b..07e803a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
 	switch (type) {
 	case DP_SDP_VSC:
 		return VIDEO_DIP_ENABLE_VSC_HSW;
+	case DP_SDP_PPS:
+		return VDIP_ENABLE_PPS;
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return VIDEO_DIP_ENABLE_AVI_HSW;
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 	switch (type) {
 	case DP_SDP_VSC:
 		return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+	case DP_SDP_PPS:
+		return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,14 +152,25 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void g4x_write_infoframe(struct drm_encoder *encoder,
+static int hsw_dip_data_size(unsigned int type)
+{
+	switch (type) {
+	case DP_SDP_VSC:
+		return VIDEO_DIP_VSC_DATA_SIZE;
+	case DP_SDP_PPS:
+		return VIDEO_DIP_PPS_DATA_SIZE;
+	default:
+		return VIDEO_DIP_DATA_SIZE;
+	}
+}
+
+static void g4x_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 	int i;
 
@@ -186,31 +201,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 	POSTING_READ(VIDEO_DIP_CTL);
 }
 
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return false;
 
-	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
 		return false;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
 		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
-static void ibx_write_infoframe(struct drm_encoder *encoder,
+static void ibx_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
@@ -243,11 +256,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
 	i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
 	u32 val = I915_READ(reg);
@@ -255,7 +267,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return false;
 
-	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
 		return false;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +275,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
 		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void cpt_write_infoframe(struct drm_encoder *encoder,
+static void cpt_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
@@ -306,10 +317,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
 	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
@@ -321,14 +332,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
 		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void vlv_write_infoframe(struct drm_encoder *encoder,
+static void vlv_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
@@ -361,18 +371,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 	POSTING_READ(reg);
 }
 
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
 	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return false;
 
-	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
 		return false;
 
 	return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,21 +389,21 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
 		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
-static void hsw_write_infoframe(struct drm_encoder *encoder,
+static void hsw_write_infoframe(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				unsigned int type,
 				const void *frame, ssize_t len)
 {
 	const u32 *data = frame;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
-	int data_size = type == DP_SDP_VSC ?
-		VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+	int data_size;
 	int i;
 	u32 val = I915_READ(ctl_reg);
 
+	data_size = hsw_dip_data_size(type);
+
 	val &= ~hsw_infoframe_enable(type);
 	I915_WRITE(ctl_reg, val);
 
@@ -415,10 +424,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 	POSTING_READ(ctl_reg);
 }
 
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
 
 	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +452,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
  * trick them by giving an offset into the buffer and moving back the header
  * bytes by one.
  */
-static void intel_write_infoframe(struct drm_encoder *encoder,
+static void intel_write_infoframe(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  union hdmi_infoframe *frame)
 {
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	u8 buffer[VIDEO_DIP_DATA_SIZE];
 	ssize_t len;
 
@@ -457,24 +466,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
 		return;
 
 	/* Insert the 'hole' (see big comment above) at position 3 */
-	buffer[0] = buffer[1];
-	buffer[1] = buffer[2];
-	buffer[2] = buffer[3];
+	memmove(&buffer[0], &buffer[1], 3);
 	buffer[3] = 0;
 	len++;
 
-	intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+	intel_dig_port->write_infoframe(encoder,
+					crtc_state,
+					frame->any.type, buffer, len);
 }
 
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
 					 const struct intel_crtc_state *crtc_state,
 					 const struct drm_connector_state *conn_state)
 {
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
 	struct drm_connector *connector = &intel_hdmi->attached_connector->base;
-	bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+	bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
+	   connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
 	union hdmi_infoframe frame;
 	int ret;
 
@@ -486,8 +496,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
 		return;
 	}
 
-	if (crtc_state->ycbcr420)
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 		frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+	else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+		frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
 	else
 		frame.avi.colorspace = HDMI_COLORSPACE_RGB;
 
@@ -502,10 +514,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
 					    conn_state);
 
 	/* TODO: handle pixel repetition for YCBCR420 outputs */
-	intel_write_infoframe(encoder, crtc_state, &frame);
+	intel_write_infoframe(encoder, crtc_state,
+			      &frame);
 }
 
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
 					 const struct intel_crtc_state *crtc_state)
 {
 	union hdmi_infoframe frame;
@@ -519,11 +532,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
 
 	frame.spd.sdi = HDMI_SPD_SDI_PC;
 
-	intel_write_infoframe(encoder, crtc_state, &frame);
+	intel_write_infoframe(encoder, crtc_state,
+			      &frame);
 }
 
 static void
-intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
 			      const struct intel_crtc_state *crtc_state,
 			      const struct drm_connector_state *conn_state)
 {
@@ -536,20 +550,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
 	if (ret < 0)
 		return;
 
-	intel_write_infoframe(encoder, crtc_state, &frame);
+	intel_write_infoframe(encoder, crtc_state,
+			      &frame);
 }
 
-static void g4x_set_infoframes(struct drm_encoder *encoder,
+static void g4x_set_infoframes(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = VIDEO_DIP_CTL;
 	u32 val = I915_READ(reg);
-	u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
@@ -657,11 +672,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
 		 mode->crtc_htotal/2 % pixels_per_group == 0);
 }
 
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
 					 const struct intel_crtc_state *crtc_state,
 					 const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	i915_reg_t reg;
 	u32 val = 0;
@@ -689,18 +704,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
 	return val != 0;
 }
 
-static void ibx_set_infoframes(struct drm_encoder *encoder,
+static void ibx_set_infoframes(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
-	u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
@@ -742,14 +757,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
 	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void cpt_set_infoframes(struct drm_encoder *encoder,
+static void cpt_set_infoframes(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
@@ -785,18 +800,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
 	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void vlv_set_infoframes(struct drm_encoder *encoder,
+static void vlv_set_infoframes(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
-	u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
@@ -838,12 +852,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
 	intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
 
-static void hsw_set_infoframes(struct drm_encoder *encoder,
+static void hsw_set_infoframes(struct intel_encoder *encoder,
 			       bool enable,
 			       const struct intel_crtc_state *crtc_state,
 			       const struct drm_connector_state *conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
 	u32 val = I915_READ(reg);
 
@@ -965,13 +979,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
 				    DRM_HDCP_AN_LEN);
 	if (ret) {
-		DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
 		return ret;
 	}
 
 	ret = intel_gmbus_output_aksv(adapter);
 	if (ret < 0) {
-		DRM_ERROR("Failed to output aksv (%d)\n", ret);
+		DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
 		return ret;
 	}
 	return 0;
@@ -984,7 +998,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
 				   DRM_HDCP_KSV_LEN);
 	if (ret)
-		DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
 	return ret;
 }
 
@@ -996,7 +1010,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
 				   bstatus, DRM_HDCP_BSTATUS_LEN);
 	if (ret)
-		DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
 	return ret;
 }
 
@@ -1009,7 +1023,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
 
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
 	if (ret) {
-		DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
 		return ret;
 	}
 	*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1024,7 +1038,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
 				   ri_prime, DRM_HDCP_RI_LEN);
 	if (ret)
-		DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
 	return ret;
 }
 
@@ -1037,7 +1051,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
 
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
 	if (ret) {
-		DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
 		return ret;
 	}
 	*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1052,7 +1066,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
 				   ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
 	if (ret) {
-		DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+		DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
 		return ret;
 	}
 	return 0;
@@ -1070,7 +1084,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 	ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
 				   part, DRM_HDCP_V_PRIME_PART_LEN);
 	if (ret)
-		DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+		DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
 	return ret;
 }
 
@@ -1217,7 +1231,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	if (tmp & HDMI_MODE_SELECT_HDMI)
 		pipe_config->has_hdmi_sink = true;
 
-	if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+	if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
 		pipe_config->has_infoframe = true;
 
 	if (tmp & SDVO_AUDIO_ENABLE)
@@ -1438,7 +1452,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 	}
 
-	intel_dig_port->set_infoframes(&encoder->base, false,
+	intel_dig_port->set_infoframes(encoder,
+				       false,
 				       old_crtc_state, old_conn_state);
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1597,6 +1612,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 	struct drm_atomic_state *state = crtc_state->base.state;
 	struct drm_connector_state *connector_state;
 	struct drm_connector *connector;
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->base.adjusted_mode;
 	int i;
 
 	if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1624,7 +1641,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 		if (connector_state->crtc != crtc_state->base.crtc)
 			continue;
 
-		if (crtc_state->ycbcr420) {
+		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
 			const struct drm_hdmi_info *hdmi = &info->hdmi;
 
 			if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1645,7 +1662,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 
 	/* Display WA #1139: glk */
 	if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
-	    crtc_state->base.adjusted_mode.htotal > 5460)
+	    adjusted_mode->htotal > 5460)
+		return false;
+
+	/* Display Wa_1405510057:icl */
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+	    bpc == 10 && IS_ICELAKE(dev_priv) &&
+	    (adjusted_mode->crtc_hblank_end -
+	     adjusted_mode->crtc_hblank_start) % 8 == 2)
 		return false;
 
 	return true;
@@ -1669,7 +1693,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
 	*clock_12bpc /= 2;
 	*clock_10bpc /= 2;
 	*clock_8bpc /= 2;
-	config->ycbcr420 = true;
+	config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
 
 	/* YCBCR 420 output conversion needs a scaler */
 	if (skl_update_scaler_crtc(config)) {
@@ -1703,6 +1727,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
 	if (pipe_config->has_hdmi_sink)
@@ -1973,7 +1998,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
 
 	intel_hdmi_prepare(encoder, pipe_config);
 
-	intel_dig_port->set_infoframes(&encoder->base,
+	intel_dig_port->set_infoframes(encoder,
 				       pipe_config->has_infoframe,
 				       pipe_config, conn_state);
 }
@@ -1991,7 +2016,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
 	vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
 				 0x2b247878);
 
-	dport->set_infoframes(&encoder->base,
+	dport->set_infoframes(encoder,
 			      pipe_config->has_infoframe,
 			      pipe_config, conn_state);
 
@@ -2062,7 +2087,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
 	/* Use 800mV-0dB */
 	chv_set_phy_signal_level(encoder, 128, 102, false);
 
-	dport->set_infoframes(&encoder->base,
+	dport->set_infoframes(encoder,
 			      pipe_config->has_infoframe,
 			      pipe_config, conn_state);
 
@@ -2074,13 +2099,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
 	chv_phy_release_cl2_override(encoder);
 }
 
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+	int ret;
+
+	ret = intel_connector_register(connector);
+	if (ret)
+		return ret;
+
+	i915_debugfs_connector_add(connector);
+
+	return ret;
+}
+
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
 	if (intel_attached_hdmi(connector)->cec_notifier)
 		cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
-	kfree(to_intel_connector(connector)->detect_edid);
-	drm_connector_cleanup(connector);
-	kfree(connector);
+
+	intel_connector_destroy(connector);
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2089,7 +2127,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_get_property = intel_digital_connector_atomic_get_property,
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
-	.late_register = intel_connector_register,
+	.late_register = intel_hdmi_connector_register,
 	.early_unregister = intel_connector_unregister,
 	.destroy = intel_hdmi_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2109,11 +2147,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
 	intel_attach_aspect_ratio_property(connector);
 	drm_connector_attach_content_type_property(connector);
 	connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+
+	if (!HAS_GMCH_DISPLAY(dev_priv))
+		drm_connector_attach_max_bpc_property(connector, 8, 12);
 }
 
 /*
@@ -2324,9 +2367,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
 		intel_dig_port->set_infoframes = g4x_set_infoframes;
 		intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
 	} else if (HAS_DDI(dev_priv)) {
-		intel_dig_port->write_infoframe = hsw_write_infoframe;
-		intel_dig_port->set_infoframes = hsw_set_infoframes;
-		intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+		if (intel_dig_port->lspcon.active) {
+			intel_dig_port->write_infoframe =
+					lspcon_write_infoframe;
+			intel_dig_port->set_infoframes = lspcon_set_infoframes;
+			intel_dig_port->infoframe_enabled =
+						lspcon_infoframe_enabled;
+		} else {
+			intel_dig_port->set_infoframes = hsw_set_infoframes;
+			intel_dig_port->infoframe_enabled =
+						hsw_infoframe_enabled;
+			intel_dig_port->write_infoframe = hsw_write_infoframe;
+		}
 	} else if (HAS_PCH_IBX(dev_priv)) {
 		intel_dig_port->write_infoframe = ibx_write_infoframe;
 		intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2485,5 +2537,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
 
 	intel_infoframe_init(intel_dig_port);
 
+	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 	intel_hdmi_init_connector(intel_dig_port, intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 9a80181..e24174d 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
 #define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
 
 /**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
  * @dev_priv: private driver data pointer
  * @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
  *
- * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
  * storms. Only the pin specific stats and state are changed, the caller is
  * responsible for further action.
  *
- * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
  * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
- * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
  *
  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
  * and should only be adjusted for automated hotplug testing.
  *
- * Return true if an irq storm was detected on @pin.
+ * Return true if an IRQ storm was detected on @pin.
  */
 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
-				       enum hpd_pin pin)
+				       enum hpd_pin pin, bool long_hpd)
 {
-	unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+	struct i915_hotplug *hpd = &dev_priv->hotplug;
+	unsigned long start = hpd->stats[pin].last_jiffies;
 	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
-	const int threshold = dev_priv->hotplug.hpd_storm_threshold;
+	const int increment = long_hpd ? 10 : 1;
+	const int threshold = hpd->hpd_storm_threshold;
 	bool storm = false;
 
+	if (!threshold ||
+	    (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+		return false;
+
 	if (!time_in_range(jiffies, start, end)) {
-		dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
-		dev_priv->hotplug.stats[pin].count = 0;
-		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
-	} else if (dev_priv->hotplug.stats[pin].count > threshold &&
-		   threshold) {
-		dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+		hpd->stats[pin].last_jiffies = jiffies;
+		hpd->stats[pin].count = 0;
+	}
+
+	hpd->stats[pin].count += increment;
+	if (hpd->stats[pin].count > threshold) {
+		hpd->stats[pin].state = HPD_MARK_DISABLED;
 		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
 		storm = true;
 	} else {
-		dev_priv->hotplug.stats[pin].count++;
 		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
-			      dev_priv->hotplug.stats[pin].count);
+			      hpd->stats[pin].count);
 	}
 
 	return storm;
 }
 
-static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_connector *intel_connector;
@@ -348,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	hpd_event_bits = dev_priv->hotplug.event_bits;
 	dev_priv->hotplug.event_bits = 0;
 
-	/* Disable hotplug on connectors that hit an irq storm. */
-	intel_hpd_irq_storm_disable(dev_priv);
+	/* Enable polling for connectors which had HPD IRQ storms */
+	intel_hpd_irq_storm_switch_to_polling(dev_priv);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -474,15 +491,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 			queue_hp = true;
 		}
 
-		if (!long_hpd)
-			continue;
-
-		if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+		if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
 			dev_priv->hotplug.event_bits &= ~BIT(pin);
 			storm_detected = true;
+			queue_hp = true;
 		}
 	}
 
+	/*
+	 * Disable any IRQs that storms were detected on. Polling enablement
+	 * happens later in our hotplug work.
+	 */
 	if (storm_detected && dev_priv->display_irqs_enabled)
 		dev_priv->display.hpd_irq_setup(dev_priv);
 	spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540d..bc27b69 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ int intel_huc_auth(struct intel_huc *huc)
  * This function reads status register to verify if HuC
  * firmware was successfully loaded.
  *
- * Returns positive value if HuC firmware is loaded and verified
- * and -ENODEV if HuC is not present.
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
  */
 int intel_huc_check_status(struct intel_huc *huc)
 {
 	struct drm_i915_private *dev_priv = huc_to_i915(huc);
-	u32 status;
+	bool status;
 
 	if (!HAS_HUC(dev_priv))
 		return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab..802d039 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
 	unsigned int pin;
 	int ret;
 
-	if (INTEL_INFO(dev_priv)->num_pipes == 0)
+	if (!HAS_DISPLAY(dev_priv))
 		return 0;
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 58d1d3d..4be167d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 	ce->lrc_desc = desc;
 }
 
-static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
-{
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct i915_priolist *p;
-	struct rb_node **parent, *rb;
-	bool first = true;
-
-	if (unlikely(execlists->no_priolist))
-		prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
-	/* most positive priority is scheduled first, equal priorities fifo */
-	rb = NULL;
-	parent = &execlists->queue.rb_root.rb_node;
-	while (*parent) {
-		rb = *parent;
-		p = to_priolist(rb);
-		if (prio > p->priority) {
-			parent = &rb->rb_left;
-		} else if (prio < p->priority) {
-			parent = &rb->rb_right;
-			first = false;
-		} else {
-			return p;
-		}
-	}
-
-	if (prio == I915_PRIORITY_NORMAL) {
-		p = &execlists->default_priolist;
-	} else {
-		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
-		/* Convert an allocation failure to a priority bump */
-		if (unlikely(!p)) {
-			prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
-			/* To maintain ordering with all rendering, after an
-			 * allocation failure we have to disable all scheduling.
-			 * Requests will then be executed in fifo, and schedule
-			 * will ensure that dependencies are emitted in fifo.
-			 * There will be still some reordering with existing
-			 * requests, so if userspace lied about their
-			 * dependencies that reordering may be visible.
-			 */
-			execlists->no_priolist = true;
-			goto find_priolist;
-		}
-	}
-
-	p->priority = prio;
-	INIT_LIST_HEAD(&p->requests);
-	rb_link_node(&p->node, rb, parent);
-	rb_insert_color_cached(&p->node, &execlists->queue, first);
-
-	return p;
-}
-
 static void unwind_wa_tail(struct i915_request *rq)
 {
 	rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
 
 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
-	struct i915_request *rq, *rn;
-	struct i915_priolist *uninitialized_var(p);
-	int last_prio = I915_PRIORITY_INVALID;
+	struct i915_request *rq, *rn, *active = NULL;
+	struct list_head *uninitialized_var(pl);
+	int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
 
 	lockdep_assert_held(&engine->timeline.lock);
 
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 					 &engine->timeline.requests,
 					 link) {
 		if (i915_request_completed(rq))
-			return;
+			break;
 
 		__i915_request_unsubmit(rq);
 		unwind_wa_tail(rq);
 
-		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
-		if (rq_prio(rq) != last_prio) {
-			last_prio = rq_prio(rq);
-			p = lookup_priolist(engine, last_prio);
-		}
+		GEM_BUG_ON(rq->hw_context->active);
 
-		GEM_BUG_ON(p->priority != rq_prio(rq));
-		list_add(&rq->sched.link, &p->requests);
+		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+		if (rq_prio(rq) != prio) {
+			prio = rq_prio(rq);
+			pl = i915_sched_lookup_priolist(engine, prio);
+		}
+		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+		list_add(&rq->sched.link, pl);
+
+		active = rq;
+	}
+
+	/*
+	 * The active request is now effectively the start of a new client
+	 * stream, so give it the equivalent small priority bump to prevent
+	 * it being gazumped a second time by another peer.
+	 */
+	if (!(prio & I915_PRIORITY_NEWCLIENT)) {
+		prio |= I915_PRIORITY_NEWCLIENT;
+		list_move_tail(&active->sched.link,
+			       i915_sched_lookup_priolist(engine, prio));
 	}
 }
 
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 {
 	struct intel_engine_cs *engine =
 		container_of(execlists, typeof(*engine), execlists);
-	unsigned long flags;
-
-	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	__unwind_incomplete_requests(engine);
-
-	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
 static inline void
 execlists_context_schedule_in(struct i915_request *rq)
 {
+	GEM_BUG_ON(rq->hw_context->active);
+
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
 	intel_engine_context_in(rq->engine);
+	rq->hw_context->active = rq->engine;
 }
 
 static inline void
 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
 {
+	rq->hw_context->active = NULL;
 	intel_engine_context_out(rq->engine);
 	execlists_context_status_change(rq, status);
 	trace_i915_request_out(rq);
@@ -417,9 +374,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
+	struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
 	struct intel_context *ce = rq->hw_context;
-	struct i915_hw_ppgtt *ppgtt =
-		rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +386,7 @@ static u64 execlists_update_context(struct i915_request *rq)
 	 * PML4 is allocated during ppgtt init, so this is not needed
 	 * in 48-bit mode.
 	 */
-	if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
+	if (!i915_vm_is_48bit(&ppgtt->vm))
 		execlists_update_context_pdps(ppgtt, reg_state);
 
 	/*
@@ -686,8 +642,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -706,11 +663,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * combine this request with the last, then we
 				 * are done.
 				 */
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				/*
 				 * If GVT overrides us we only ever submit
@@ -720,11 +674,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * request) to the second port.
 				 */
 				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context)) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				    ctx_single_port_submission(rq->hw_context))
 					goto done;
-				}
 
 				GEM_BUG_ON(last->hw_context == rq->hw_context);
 
@@ -735,15 +686,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				GEM_BUG_ON(port_isset(port));
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
+
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -820,6 +772,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 
 static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 {
+	const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
 	/*
 	 * After a reset, the HW starts writing into CSB entry [0]. We
 	 * therefore have to set our HEAD pointer back one entry so that
@@ -829,8 +783,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 	 * inline comparison of our cached head position against the last HW
 	 * write works even before the first interrupt.
 	 */
-	execlists->csb_head = execlists->csb_write_reset;
-	WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+	execlists->csb_head = reset_value;
+	WRITE_ONCE(*execlists->csb_write, reset_value);
 }
 
 static void nop_submission_tasklet(unsigned long data)
@@ -871,27 +825,34 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!rq->global_seqno);
-		if (!i915_request_completed(rq))
-			dma_fence_set_error(&rq->fence, -EIO);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+			continue;
+
+		dma_fence_set_error(&rq->fence, -EIO);
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-			INIT_LIST_HEAD(&rq->sched.link);
+		priolist_for_each_request_consume(rq, rn, p, i) {
+			list_del_init(&rq->sched.link);
 
 			dma_fence_set_error(&rq->fence, -EIO);
 			__i915_request_submit(rq);
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
 
+	intel_write_status_page(engine,
+				I915_GEM_HWS_INDEX,
+				intel_engine_last_submit(engine));
+
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority = INT_MIN;
@@ -1093,13 +1054,7 @@ static void queue_request(struct intel_engine_cs *engine,
 			  struct i915_sched_node *node,
 			  int prio)
 {
-	list_add_tail(&node->link,
-		      &lookup_priolist(engine, prio)->requests);
-}
-
-static void __update_queue(struct intel_engine_cs *engine, int prio)
-{
-	engine->execlists.queue_priority = prio;
+	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
 }
 
 static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1118,7 +1073,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
 	if (prio > engine->execlists.queue_priority) {
-		__update_queue(engine, prio);
+		engine->execlists.queue_priority = prio;
 		__submit_queue_imm(engine);
 	}
 }
@@ -1141,139 +1096,6 @@ static void execlists_submit_request(struct i915_request *request)
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-static struct i915_request *sched_to_request(struct i915_sched_node *node)
-{
-	return container_of(node, struct i915_request, sched);
-}
-
-static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
-{
-	struct intel_engine_cs *engine = sched_to_request(node)->engine;
-
-	GEM_BUG_ON(!locked);
-
-	if (engine != locked) {
-		spin_unlock(&locked->timeline.lock);
-		spin_lock(&engine->timeline.lock);
-	}
-
-	return engine;
-}
-
-static void execlists_schedule(struct i915_request *request,
-			       const struct i915_sched_attr *attr)
-{
-	struct i915_priolist *uninitialized_var(pl);
-	struct intel_engine_cs *engine, *last;
-	struct i915_dependency *dep, *p;
-	struct i915_dependency stack;
-	const int prio = attr->priority;
-	LIST_HEAD(dfs);
-
-	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
-
-	if (i915_request_completed(request))
-		return;
-
-	if (prio <= READ_ONCE(request->sched.attr.priority))
-		return;
-
-	/* Need BKL in order to use the temporary link inside i915_dependency */
-	lockdep_assert_held(&request->i915->drm.struct_mutex);
-
-	stack.signaler = &request->sched;
-	list_add(&stack.dfs_link, &dfs);
-
-	/*
-	 * Recursively bump all dependent priorities to match the new request.
-	 *
-	 * A naive approach would be to use recursion:
-	 * static void update_priorities(struct i915_sched_node *node, prio) {
-	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
-	 *		update_priorities(dep->signal, prio)
-	 *	queue_request(node);
-	 * }
-	 * but that may have unlimited recursion depth and so runs a very
-	 * real risk of overunning the kernel stack. Instead, we build
-	 * a flat list of all dependencies starting with the current request.
-	 * As we walk the list of dependencies, we add all of its dependencies
-	 * to the end of the list (this may include an already visited
-	 * request) and continue to walk onwards onto the new dependencies. The
-	 * end result is a topological list of requests in reverse order, the
-	 * last element in the list is the request we must execute first.
-	 */
-	list_for_each_entry(dep, &dfs, dfs_link) {
-		struct i915_sched_node *node = dep->signaler;
-
-		/*
-		 * Within an engine, there can be no cycle, but we may
-		 * refer to the same dependency chain multiple times
-		 * (redundant dependencies are not eliminated) and across
-		 * engines.
-		 */
-		list_for_each_entry(p, &node->signalers_list, signal_link) {
-			GEM_BUG_ON(p == dep); /* no cycles! */
-
-			if (i915_sched_node_signaled(p->signaler))
-				continue;
-
-			GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
-			if (prio > READ_ONCE(p->signaler->attr.priority))
-				list_move_tail(&p->dfs_link, &dfs);
-		}
-	}
-
-	/*
-	 * If we didn't need to bump any existing priorities, and we haven't
-	 * yet submitted this request (i.e. there is no potential race with
-	 * execlists_submit_request()), we can set our own priority and skip
-	 * acquiring the engine locks.
-	 */
-	if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
-		GEM_BUG_ON(!list_empty(&request->sched.link));
-		request->sched.attr = *attr;
-		if (stack.dfs_link.next == stack.dfs_link.prev)
-			return;
-		__list_del_entry(&stack.dfs_link);
-	}
-
-	last = NULL;
-	engine = request->engine;
-	spin_lock_irq(&engine->timeline.lock);
-
-	/* Fifo and depth-first replacement ensure our deps execute before us */
-	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
-		struct i915_sched_node *node = dep->signaler;
-
-		INIT_LIST_HEAD(&dep->dfs_link);
-
-		engine = sched_lock_engine(node, engine);
-
-		if (prio <= node->attr.priority)
-			continue;
-
-		node->attr.priority = prio;
-		if (!list_empty(&node->link)) {
-			if (last != engine) {
-				pl = lookup_priolist(engine, prio);
-				last = engine;
-			}
-			GEM_BUG_ON(pl->priority != prio);
-			list_move_tail(&node->link, &pl->requests);
-		}
-
-		if (prio > engine->execlists.queue_priority &&
-		    i915_sw_fence_done(&sched_to_request(node)->submit)) {
-			/* defer submission until after all of our updates */
-			__update_queue(engine, prio);
-			tasklet_hi_schedule(&engine->execlists.tasklet);
-		}
-	}
-
-	spin_unlock_irq(&engine->timeline.lock);
-}
-
 static void execlists_context_destroy(struct intel_context *ce)
 {
 	GEM_BUG_ON(ce->pin_count);
@@ -1289,6 +1111,28 @@ static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine;
+
+	/*
+	 * The tasklet may still be using a pointer to our state, via an
+	 * old request. However, since we know we only unpin the context
+	 * on retirement of the following request, we know that the last
+	 * request referencing us will have had a completion CS interrupt.
+	 * If we see that it is still active, it means that the tasklet hasn't
+	 * had the chance to run yet; let it run before we teardown the
+	 * reference it may use.
+	 */
+	engine = READ_ONCE(ce->active);
+	if (unlikely(engine)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&engine->timeline.lock, flags);
+		process_csb(engine);
+		spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+		GEM_BUG_ON(READ_ONCE(ce->active));
+	}
+
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 
 	intel_ring_unpin(ce->ring);
@@ -1392,6 +1236,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
 	struct intel_context *ce = to_intel_context(ctx, engine);
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+	GEM_BUG_ON(!ctx->ppgtt);
 
 	if (likely(ce->pin_count++))
 		return ce;
@@ -1571,18 +1416,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 
 	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
-	/* WaClearSlmSpaceAtContextSwitch:kbl */
-	/* Actual scratch location is at 128 bytes offset */
-	if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
-		batch = gen8_emit_pipe_control(batch,
-					       PIPE_CONTROL_FLUSH_L3 |
-					       PIPE_CONTROL_GLOBAL_GTT_IVB |
-					       PIPE_CONTROL_CS_STALL |
-					       PIPE_CONTROL_QW_WRITE,
-					       i915_scratch_offset(engine->i915)
-					       + 2 * CACHELINE_BYTES);
-	}
-
 	/* WaMediaPoolStateCmdInWABB:bxt,glk */
 	if (HAS_POOLED_EU(engine->i915)) {
 		/*
@@ -1697,7 +1530,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	unsigned int i;
 	int ret;
 
-	if (GEM_WARN_ON(engine->id != RCS))
+	if (GEM_DEBUG_WARN_ON(engine->id != RCS))
 		return -EINVAL;
 
 	switch (INTEL_GEN(engine->i915)) {
@@ -1736,8 +1569,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	 */
 	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
 		wa_bb[i]->offset = batch_ptr - batch;
-		if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
-					    CACHELINE_BYTES))) {
+		if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+						  CACHELINE_BYTES))) {
 			ret = -EINVAL;
 			break;
 		}
@@ -1825,7 +1658,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
+	intel_engine_apply_whitelist(engine);
 
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1848,7 +1681,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
+	intel_engine_apply_whitelist(engine);
 
 	return 0;
 }
@@ -1922,7 +1755,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
 	unsigned long flags;
 	u32 *regs;
 
-	GEM_TRACE("%s request global=%x, current=%d\n",
+	GEM_TRACE("%s request global=%d, current=%d\n",
 		  engine->name, request ? request->global_seqno : 0,
 		  intel_engine_get_seqno(engine));
 
@@ -2049,8 +1882,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 	 * it is unsafe in case of lite-restore (because the ctx is
 	 * not idle). PML4 is allocated during ppgtt init so this is
 	 * not needed in 48-bit.*/
-	if (rq->gem_context->ppgtt &&
-	    (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+	if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
 	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
 	    !intel_vgpu_active(rq->i915)) {
 		ret = intel_logical_ring_emit_pdps(rq);
@@ -2129,7 +1961,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
 
 	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (request->engine->id == VCS)
+		if (request->engine->class == VIDEO_DECODE_CLASS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -2261,7 +2093,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ctx_workarounds_emit(rq);
+	ret = intel_engine_emit_ctx_wa(rq);
 	if (ret)
 		return ret;
 
@@ -2314,7 +2146,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 {
 	engine->submit_request = execlists_submit_request;
 	engine->cancel_requests = execlists_cancel_requests;
-	engine->schedule = execlists_schedule;
+	engine->schedule = i915_schedule;
 	engine->execlists.tasklet.func = execlists_submission_tasklet;
 
 	engine->reset.prepare = execlists_reset_prepare;
@@ -2402,12 +2234,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
 	logical_ring_default_irqs(engine);
 }
 
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
-	/* Older GVT emulation depends upon intercepting CSB mmio */
-	return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
 static int logical_ring_init(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
@@ -2437,24 +2263,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 			upper_32_bits(ce->lrc_desc);
 	}
 
-	execlists->csb_read =
-		i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
-	if (csb_force_mmio(i915)) {
-		execlists->csb_status = (u32 __force *)
-			(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+	execlists->csb_status =
+		&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
 
-		execlists->csb_write = (u32 __force *)execlists->csb_read;
-		execlists->csb_write_reset =
-			_MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
-				      GEN8_CSB_ENTRIES - 1);
-	} else {
-		execlists->csb_status =
-			&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+	execlists->csb_write =
+		&engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
 
-		execlists->csb_write =
-			&engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
-		execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
-	}
 	reset_csb_pointers(execlists);
 
 	return 0;
@@ -2495,6 +2309,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 			  ret);
 	}
 
+	intel_engine_init_whitelist(engine);
 	intel_engine_init_workarounds(engine);
 
 	return 0;
@@ -2646,7 +2461,6 @@ static void execlists_init_reg_state(u32 *regs,
 				     struct intel_ring *ring)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
 	u32 base = engine->mmio_base;
 	bool rcs = engine->class == RENDER_CLASS;
 
@@ -2718,12 +2532,12 @@ static void execlists_init_reg_state(u32 *regs,
 	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
 	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-	if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
+	if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
 		/* 64b PPGTT (48bit canonical)
 		 * PDP0_DESCRIPTOR contains the base address to PML4 and
 		 * other PDP Descriptors are ignored.
 		 */
-		ASSIGN_CTX_PML4(ppgtt, regs);
+		ASSIGN_CTX_PML4(ctx->ppgtt, regs);
 	}
 
 	if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5..96a8d95 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
 #include <drm/drm_dp_dual_mode_helper.h>
 #include "intel_drv.h"
 
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define  LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define  LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define  LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
 static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
 {
 	struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
 	}
 }
 
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+	struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+	struct drm_dp_dpcd_ident *ident;
+	u32 vendor_oui;
+
+	if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+		DRM_ERROR("Can't read description\n");
+		return false;
+	}
+
+	ident = &dp->desc.ident;
+	vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+		      ident->oui[2];
+
+	switch (vendor_oui) {
+	case LSPCON_VENDOR_MCA_OUI:
+		lspcon->vendor = LSPCON_VENDOR_MCA;
+		DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+		break;
+
+	case LSPCON_VENDOR_PARADE_OUI:
+		lspcon->vendor = LSPCON_VENDOR_PARADE;
+		DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+		break;
+
+	default:
+		DRM_ERROR("Invalid/Unknown vendor OUI\n");
+		return false;
+	}
+
+	return true;
+}
+
 static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
 {
 	enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
 	return true;
 }
 
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+			    struct intel_crtc_state *crtc_state)
+{
+	const struct drm_display_info *info = &connector->display_info;
+	const struct drm_display_mode *adjusted_mode =
+					&crtc_state->base.adjusted_mode;
+
+	if (drm_mode_is_420_only(info, adjusted_mode) &&
+	    connector->ycbcr_420_allowed) {
+		crtc_state->port_clock /= 2;
+		crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+		crtc_state->lspcon_downsampling = true;
+	}
+}
+
 static bool lspcon_probe(struct intel_lspcon *lspcon)
 {
 	int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
 	/* Yay ... got a LSPCON device */
 	DRM_DEBUG_KMS("LSPCON detected\n");
 	lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
-	lspcon->active = true;
+
+	/*
+	 * In the SW state machine, lets Put LSPCON in PCON mode only.
+	 * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+	 * 2.0 sinks.
+	 */
+	if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+		if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+			DRM_ERROR("LSPCON mode change to PCON failed\n");
+			return false;
+		}
+	}
 	return true;
 }
 
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
 	DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
 }
 
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+	u8 avi_if_ctrl;
+	u8 retry;
+	ssize_t ret;
+
+	/* Check if LSPCON FW is ready for data */
+	for (retry = 0; retry < 5; retry++) {
+		if (retry)
+			usleep_range(200, 300);
+
+		ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+				       &avi_if_ctrl, 1);
+		if (ret < 0) {
+			DRM_ERROR("Failed to read AVI IF control\n");
+			return false;
+		}
+
+		if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+			return true;
+	}
+
+	DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+	return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+						  uint8_t *avi_buf)
+{
+	u8 avi_if_ctrl;
+	u8 block_count = 0;
+	u8 *data;
+	uint16_t reg;
+	ssize_t ret;
+
+	while (block_count < 4) {
+		if (!lspcon_parade_fw_ready(aux)) {
+			DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
+				      block_count);
+			return false;
+		}
+
+		reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+		data = avi_buf + block_count * 8;
+		ret = drm_dp_dpcd_write(aux, reg, data, 8);
+		if (ret < 0) {
+			DRM_ERROR("Failed to write AVI IF block %d\n",
+				  block_count);
+			return false;
+		}
+
+		/*
+		 * Once a block of data is written, we have to inform the FW
+		 * about this by writing into avi infoframe control register:
+		 * - set the kickoff bit[7] to 1
+		 * - write the block no. to bits[1:0]
+		 */
+		reg = LSPCON_PARADE_AVI_IF_CTRL;
+		avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+		ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+		if (ret < 0) {
+			DRM_ERROR("Failed to update (0x%x), block %d\n",
+				  reg, block_count);
+			return false;
+		}
+
+		block_count++;
+	}
+
+	DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+	return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+					       const uint8_t *frame,
+					       ssize_t len)
+{
+	uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+	/*
+	 * Parade's frames contains 32 bytes of data, divided
+	 * into 4 frames:
+	 *	Token byte (first byte of first frame, must be non-zero)
+	 *	HB0 to HB2	 from AVI IF (3 bytes header)
+	 *	PB0 to PB27 from AVI IF (28 bytes data)
+	 * So it should look like this
+	 *	first block: | <token> <HB0-HB2> <DB0-DB3> |
+	 *	next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+	 */
+
+	if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+		DRM_ERROR("Invalid length of infoframes\n");
+		return false;
+	}
+
+	memcpy(&avi_if[1], frame, len);
+
+	if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+		DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+					    const uint8_t *buffer, ssize_t len)
+{
+	int ret;
+	uint32_t val = 0;
+	uint32_t retry;
+	uint16_t reg;
+	const uint8_t *data = buffer;
+
+	reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+	while (val < len) {
+		/* DPCD write for AVI IF can fail on a slow FW day, so retry */
+		for (retry = 0; retry < 5; retry++) {
+			ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+			if (ret == 1) {
+				break;
+			} else if (retry < 4) {
+				mdelay(50);
+				continue;
+			} else {
+				DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+				return false;
+			}
+		}
+		val++; reg++; data++;
+	}
+
+	val = 0;
+	reg = LSPCON_MCA_AVI_IF_CTRL;
+	ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+	if (ret < 0) {
+		DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+		return false;
+	}
+
+	/* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+	val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+	val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+	ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+	if (ret < 0) {
+		DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+		return false;
+	}
+
+	val = 0;
+	ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+	if (ret < 0) {
+		DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+		return false;
+	}
+
+	if (val == LSPCON_MCA_AVI_IF_HANDLED)
+		DRM_DEBUG_KMS("AVI IF handled by FW\n");
+
+	return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+			    const struct intel_crtc_state *crtc_state,
+			    unsigned int type,
+			    const void *frame, ssize_t len)
+{
+	bool ret;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+
+	/* LSPCON only needs AVI IF */
+	if (type != HDMI_INFOFRAME_TYPE_AVI)
+		return;
+
+	if (lspcon->vendor == LSPCON_VENDOR_MCA)
+		ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+						      frame, len);
+	else
+		ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+							 frame, len);
+
+	if (!ret) {
+		DRM_ERROR("Failed to write AVI infoframes\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+			   bool enable,
+			   const struct intel_crtc_state *crtc_state,
+			   const struct drm_connector_state *conn_state)
+{
+	ssize_t ret;
+	union hdmi_infoframe frame;
+	uint8_t buf[VIDEO_DIP_DATA_SIZE];
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_lspcon *lspcon = &dig_port->lspcon;
+	struct intel_dp *intel_dp = &dig_port->dp;
+	struct drm_connector *connector = &intel_dp->attached_connector->base;
+	const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
+	bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+
+	if (!lspcon->active) {
+		DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+		return;
+	}
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+						       mode, is_hdmi2_sink);
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill AVI infoframe\n");
+		return;
+	}
+
+	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+		if (crtc_state->lspcon_downsampling)
+			frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+		else
+			frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+	} else {
+		frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+	}
+
+	drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+					   crtc_state->limited_color_range ?
+					   HDMI_QUANTIZATION_RANGE_LIMITED :
+					   HDMI_QUANTIZATION_RANGE_FULL,
+					   false, is_hdmi2_sink);
+
+	ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+	if (ret < 0) {
+		DRM_ERROR("Failed to pack AVI IF\n");
+		return;
+	}
+
+	dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+				  buf, ret);
+}
+
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *pipe_config)
+{
+	return enc_to_intel_lspcon(&encoder->base)->active;
+}
+
 void lspcon_resume(struct intel_lspcon *lspcon)
 {
 	enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
 	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_connector *connector = &dp->attached_connector->base;
 
 	if (!HAS_LSPCON(dev_priv)) {
 		DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
 		return false;
 	}
 
-	/*
-	* In the SW state machine, lets Put LSPCON in PCON mode only.
-	* In this way, it will work with both HDMI 1.4 sinks as well as HDMI
-	* 2.0 sinks.
-	*/
-	if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
-		if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
-			DRM_ERROR("LSPCON mode change to PCON failed\n");
-			return false;
-		}
-	}
-
 	if (!intel_dp_read_dpcd(dp)) {
 		DRM_ERROR("LSPCON DPCD read failed\n");
 		return false;
 	}
 
-	drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
+	if (!lspcon_detect_vendor(lspcon)) {
+		DRM_ERROR("LSPCON vendor detection failed\n");
+		return false;
+	}
 
+	connector->ycbcr_420_allowed = true;
+	lspcon->active = true;
 	DRM_DEBUG_KMS("Success: LSPCON init\n");
 	return true;
 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b08..e6c5d98 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
 #include <linux/acpi.h>
 
 /* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
-	struct intel_connector base;
-};
-
 struct intel_lvds_pps {
 	/* 100us units */
 	int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
 	struct intel_lvds_pps init_pps;
 	u32 init_lvds_val;
 
-	struct intel_lvds_connector *attached_connector;
+	struct intel_connector *attached_connector;
 };
 
 static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
 	return container_of(encoder, struct intel_lvds_encoder, base.base);
 }
 
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
-{
-	return container_of(connector, struct intel_lvds_connector, base.base);
-}
-
 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
 			     i915_reg_t lvds_reg, enum pipe *pipe)
 {
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 	struct intel_lvds_encoder *lvds_encoder =
 		to_lvds_encoder(&intel_encoder->base);
 	struct intel_connector *intel_connector =
-		&lvds_encoder->attached_connector->base;
+		lvds_encoder->attached_connector;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 		pipe_config->pipe_bpp = lvds_bpp;
 	}
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
 	/*
 	 * We have timings from the BIOS for the panel, put them in
 	 * to the adjusted mode.  The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
-	struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode;
 
 	/* use cached edid if we have one */
-	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
-		return drm_add_edid_modes(connector, lvds_connector->base.edid);
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		return drm_add_edid_modes(connector, intel_connector->edid);
 
-	mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+	mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
 	if (mode == NULL)
 		return 0;
 
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
 	return 1;
 }
 
-/**
- * intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
-static void intel_lvds_destroy(struct drm_connector *connector)
-{
-	struct intel_lvds_connector *lvds_connector =
-		to_lvds_connector(connector);
-
-	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
-		kfree(lvds_connector->base.edid);
-
-	intel_panel_fini(&lvds_connector->base.panel);
-
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
 	.get_modes = intel_lvds_get_modes,
 	.mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_lvds_destroy,
+	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
 		return i915_modparams.lvds_channel_mode == 2;
 
 	/* single channel LVDS is limited to 112 MHz */
-	if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
-	    > 112999)
+	if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
 		return true;
 
 	if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_lvds_encoder *lvds_encoder;
 	struct intel_encoder *intel_encoder;
-	struct intel_lvds_connector *lvds_connector;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	if (!lvds_encoder)
 		return;
 
-	lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
-	if (!lvds_connector) {
+	intel_connector = intel_connector_alloc();
+	if (!intel_connector) {
 		kfree(lvds_encoder);
 		return;
 	}
 
-	if (intel_connector_init(&lvds_connector->base) < 0) {
-		kfree(lvds_connector);
-		kfree(lvds_encoder);
-		return;
-	}
-
-	lvds_encoder->attached_connector = lvds_connector;
+	lvds_encoder->attached_connector = intel_connector;
 
 	intel_encoder = &lvds_encoder->base;
 	encoder = &intel_encoder->base;
-	intel_connector = &lvds_connector->base;
 	connector = &intel_connector->base;
 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	} else {
 		edid = ERR_PTR(-ENOENT);
 	}
-	lvds_connector->base.edid = edid;
+	intel_connector->edid = edid;
 
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
 	kfree(lvds_encoder);
-	kfree(lvds_connector);
+	intel_connector_free(intel_connector);
 	return;
 }
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
deleted file mode 100644
index ca44bf3..0000000
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007, 2010 Intel Corporation
- *   Jesse Barnes <jesse.barnes@intel.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <drm/drm_edid.h>
-#include <drm/drmP.h>
-#include "intel_drv.h"
-#include "i915_drv.h"
-
-/**
- * intel_connector_update_modes - update connector from edid
- * @connector: DRM connector device to use
- * @edid: previously read EDID information
- */
-int intel_connector_update_modes(struct drm_connector *connector,
-				struct edid *edid)
-{
-	int ret;
-
-	drm_connector_update_edid_property(connector, edid);
-	ret = drm_add_edid_modes(connector, edid);
-
-	return ret;
-}
-
-/**
- * intel_ddc_get_modes - get modelist from monitor
- * @connector: DRM connector device to use
- * @adapter: i2c adapter
- *
- * Fetch the EDID information from @connector using the DDC bus.
- */
-int intel_ddc_get_modes(struct drm_connector *connector,
-			struct i2c_adapter *adapter)
-{
-	struct edid *edid;
-	int ret;
-
-	edid = drm_get_edid(connector, adapter);
-	if (!edid)
-		return 0;
-
-	ret = intel_connector_update_modes(connector, edid);
-	kfree(edid);
-
-	return ret;
-}
-
-static const struct drm_prop_enum_list force_audio_names[] = {
-	{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
-	{ HDMI_AUDIO_OFF, "off" },
-	{ HDMI_AUDIO_AUTO, "auto" },
-	{ HDMI_AUDIO_ON, "on" },
-};
-
-void
-intel_attach_force_audio_property(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_property *prop;
-
-	prop = dev_priv->force_audio_property;
-	if (prop == NULL) {
-		prop = drm_property_create_enum(dev, 0,
-					   "audio",
-					   force_audio_names,
-					   ARRAY_SIZE(force_audio_names));
-		if (prop == NULL)
-			return;
-
-		dev_priv->force_audio_property = prop;
-	}
-	drm_object_attach_property(&connector->base, prop, 0);
-}
-
-static const struct drm_prop_enum_list broadcast_rgb_names[] = {
-	{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
-	{ INTEL_BROADCAST_RGB_FULL, "Full" },
-	{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
-};
-
-void
-intel_attach_broadcast_rgb_property(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_property *prop;
-
-	prop = dev_priv->broadcast_rgb_property;
-	if (prop == NULL) {
-		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
-					   "Broadcast RGB",
-					   broadcast_rgb_names,
-					   ARRAY_SIZE(broadcast_rgb_names));
-		if (prop == NULL)
-			return;
-
-		dev_priv->broadcast_rgb_property = prop;
-	}
-
-	drm_object_attach_property(&connector->base, prop, 0);
-}
-
-void
-intel_attach_aspect_ratio_property(struct drm_connector *connector)
-{
-	if (!drm_mode_create_aspect_ratio_property(connector->dev))
-		drm_object_attach_property(&connector->base,
-			connector->dev->mode_config.aspect_ratio_property,
-			DRM_MODE_PICTURE_ASPECT_NONE);
-}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b41..b8f106d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
 		opregion->acpi->cadl[i] = 0;
 }
 
-void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
-	struct intel_opregion *opregion = &dev_priv->opregion;
-
-	if (!opregion->header)
-		return;
-
-	if (opregion->acpi) {
-		intel_didl_outputs(dev_priv);
-		intel_setup_cadls(dev_priv);
-
-		/* Notify BIOS we are ready to handle ACPI video ext notifs.
-		 * Right now, all the events are handled by the ACPI video module.
-		 * We don't actually need to do anything with them. */
-		opregion->acpi->csts = 0;
-		opregion->acpi->drdy = 1;
-
-		opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
-		register_acpi_notifier(&opregion->acpi_notifier);
-	}
-
-	if (opregion->asle) {
-		opregion->asle->tche = ASLE_TCHE_BLC_EN;
-		opregion->asle->ardy = ASLE_ARDY_READY;
-	}
-}
-
-void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
-	struct intel_opregion *opregion = &dev_priv->opregion;
-
-	if (!opregion->header)
-		return;
-
-	if (opregion->asle)
-		opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
-	cancel_work_sync(&dev_priv->opregion.asle_work);
-
-	if (opregion->acpi) {
-		opregion->acpi->drdy = 0;
-
-		unregister_acpi_notifier(&opregion->acpi_notifier);
-		opregion->acpi_notifier.notifier_call = NULL;
-	}
-
-	/* just clear all opregion memory pointers now */
-	memunmap(opregion->header);
-	if (opregion->rvda) {
-		memunmap(opregion->rvda);
-		opregion->rvda = NULL;
-	}
-	if (opregion->vbt_firmware) {
-		kfree(opregion->vbt_firmware);
-		opregion->vbt_firmware = NULL;
-	}
-	opregion->header = NULL;
-	opregion->acpi = NULL;
-	opregion->swsci = NULL;
-	opregion->asle = NULL;
-	opregion->vbt = NULL;
-	opregion->lid_state = NULL;
-}
-
 static void swsci_setup(struct drm_i915_private *dev_priv)
 {
 	struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 
 	return ret - 1;
 }
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+	struct intel_opregion *opregion = &i915->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		opregion->acpi_notifier.notifier_call =
+			intel_opregion_video_event;
+		register_acpi_notifier(&opregion->acpi_notifier);
+	}
+
+	intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+	struct intel_opregion *opregion = &i915->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		intel_didl_outputs(i915);
+		intel_setup_cadls(i915);
+
+		/*
+		 * Notify BIOS we are ready to handle ACPI video ext notifs.
+		 * Right now, all the events are handled by the ACPI video
+		 * module. We don't actually need to do anything with them.
+		 */
+		opregion->acpi->csts = 0;
+		opregion->acpi->drdy = 1;
+	}
+
+	if (opregion->asle) {
+		opregion->asle->tche = ASLE_TCHE_BLC_EN;
+		opregion->asle->ardy = ASLE_ARDY_READY;
+	}
+
+	intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+	struct intel_opregion *opregion = &i915->opregion;
+
+	if (!opregion->header)
+		return;
+
+	intel_opregion_notify_adapter(i915, state);
+
+	if (opregion->asle)
+		opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+	cancel_work_sync(&i915->opregion.asle_work);
+
+	if (opregion->acpi)
+		opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+	struct intel_opregion *opregion = &i915->opregion;
+
+	intel_opregion_suspend(i915, PCI_D1);
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi_notifier.notifier_call) {
+		unregister_acpi_notifier(&opregion->acpi_notifier);
+		opregion->acpi_notifier.notifier_call = NULL;
+	}
+
+	/* just clear all opregion memory pointers now */
+	memunmap(opregion->header);
+	if (opregion->rvda) {
+		memunmap(opregion->rvda);
+		opregion->rvda = NULL;
+	}
+	if (opregion->vbt_firmware) {
+		kfree(opregion->vbt_firmware);
+		opregion->vbt_firmware = NULL;
+	}
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+	opregion->vbt = NULL;
+	opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8..4aa68ff 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
 #ifdef CONFIG_ACPI
 
 int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
 void intel_opregion_register(struct drm_i915_private *dev_priv);
 void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+			    pci_power_t state);
+
 void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
 int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
 				  bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
 {
 }
 
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+					  pci_power_t state)
+{
+}
+
 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 {
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e4..20ea7c9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
 	return err;
 }
 
-void intel_setup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
 {
 	struct intel_overlay *overlay;
 	int ret;
@@ -1387,7 +1387,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 	kfree(overlay);
 }
 
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
 {
 	struct intel_overlay *overlay;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139..e6cd7b5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
 	/* Native modes don't need fitting */
 	if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
 	    adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
-	    !pipe_config->ycbcr420)
+	    pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
 		goto done;
 
 	switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
 static u32 vlv_get_backlight(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	enum pipe pipe = intel_connector_get_pipe(connector);
 
 	return _vlv_get_backlight(dev_priv, pipe);
 }
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
 	struct intel_panel *panel = &connector->panel;
 
 	/* Disable the backlight */
-	pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+	intel_panel_actually_set_backlight(old_conn_state, 0);
 	usleep_range(2000, 3000);
 	pwm_disable(panel->backlight.pwm);
 }
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 	return 0;
 }
 
-void intel_panel_destroy_backlight(struct drm_connector *connector)
+static void intel_panel_destroy_backlight(struct intel_panel *panel)
 {
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_panel *panel = &intel_connector->panel;
-
 	/* dispose of the pwm */
 	if (panel->backlight.pwm)
 		pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
 	struct intel_connector *intel_connector =
 		container_of(panel, struct intel_connector, panel);
 
+	intel_panel_destroy_backlight(panel);
+
 	if (panel->fixed_mode)
 		drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3fe358d..a26b4ed 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3198,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 	 * and after the vblank.
 	 */
 	*a = newstate->wm.ilk.optimal;
-	if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+	if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+	    intel_state->skip_intermediate_wm)
 		return 0;
 
 	a->pipe_enabled |= b->pipe_enabled;
@@ -3650,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
 static bool
 intel_has_sagv(struct drm_i915_private *dev_priv)
 {
-	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
-	    IS_CANNONLAKE(dev_priv))
-		return true;
-
-	if (IS_SKYLAKE(dev_priv) &&
-	    dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
-		return true;
-
-	return false;
+	return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+		dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
 }
 
 /*
@@ -3822,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 			      const struct intel_crtc_state *cstate,
-			      const unsigned int total_data_rate,
+			      const u64 total_data_rate,
 			      const int num_active,
 			      struct skl_ddb_allocation *ddb)
 {
@@ -3836,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
 	adjusted_mode = &cstate->base.adjusted_mode;
-	total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
 
 	/*
 	 * 12GB/s is maximum BW supported by single DBuf slice.
 	 */
-	if (total_data_bw >= GBps(12) || num_active > 1) {
+	if (num_active > 1 || total_data_bw >= GBps(12)) {
 		ddb->enabled_slices = 2;
 	} else {
 		ddb->enabled_slices = 1;
@@ -3852,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 }
 
 static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *cstate,
-				   const unsigned int total_data_rate,
+				   const u64 total_data_rate,
 				   struct skl_ddb_allocation *ddb,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
 	struct drm_atomic_state *state = cstate->base.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *for_crtc = cstate->base.crtc;
 	const struct drm_crtc_state *crtc_state;
 	const struct drm_crtc *crtc;
@@ -3958,73 +3951,68 @@ static void
 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 			   const enum pipe pipe,
 			   const enum plane_id plane_id,
-			   struct skl_ddb_allocation *ddb /* out */)
+			   struct skl_ddb_entry *ddb_y,
+			   struct skl_ddb_entry *ddb_uv)
 {
-	u32 val, val2 = 0;
-	int fourcc, pixel_format;
+	u32 val, val2;
+	u32 fourcc = 0;
 
 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
 	if (plane_id == PLANE_CURSOR) {
 		val = I915_READ(CUR_BUF_CFG(pipe));
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val);
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
 		return;
 	}
 
 	val = I915_READ(PLANE_CTL(pipe, plane_id));
 
 	/* No DDB allocated for disabled planes */
-	if (!(val & PLANE_CTL_ENABLE))
-		return;
+	if (val & PLANE_CTL_ENABLE)
+		fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+					      val & PLANE_CTL_ORDER_RGBX,
+					      val & PLANE_CTL_ALPHA_MASK);
 
-	pixel_format = val & PLANE_CTL_FORMAT_MASK;
-	fourcc = skl_format_to_fourcc(pixel_format,
-				      val & PLANE_CTL_ORDER_RGBX,
-				      val & PLANE_CTL_ALPHA_MASK);
-
-	val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-	/*
-	 * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
-	 * registers for now.
-	 */
-	if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(dev_priv) >= 11) {
+		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+	} else {
+		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
 		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
-	if (fourcc == DRM_FORMAT_NV12) {
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val2);
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->uv_plane[pipe][plane_id], val);
-	} else {
-		skl_ddb_entry_init_from_hw(dev_priv,
-					   &ddb->plane[pipe][plane_id], val);
+		if (fourcc == DRM_FORMAT_NV12)
+			swap(val, val2);
+
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+		skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
 	}
 }
 
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+			       struct skl_ddb_entry *ddb_y,
+			       struct skl_ddb_entry *ddb_uv)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum intel_display_power_domain power_domain;
+	enum pipe pipe = crtc->pipe;
+	enum plane_id plane_id;
+
+	power_domain = POWER_DOMAIN_PIPE(pipe);
+	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+		return;
+
+	for_each_plane_id_on_crtc(crtc, plane_id)
+		skl_ddb_get_hw_plane_state(dev_priv, pipe,
+					   plane_id,
+					   &ddb_y[plane_id],
+					   &ddb_uv[plane_id]);
+
+	intel_display_power_put(dev_priv, power_domain);
+}
+
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */)
 {
-	struct intel_crtc *crtc;
-
-	memset(ddb, 0, sizeof(*ddb));
-
 	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		enum intel_display_power_domain power_domain;
-		enum plane_id plane_id;
-		enum pipe pipe = crtc->pipe;
-
-		power_domain = POWER_DOMAIN_PIPE(pipe);
-		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
-			continue;
-
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			skl_ddb_get_hw_plane_state(dev_priv, pipe,
-						   plane_id, ddb);
-
-		intel_display_power_put(dev_priv, power_domain);
-	}
 }
 
 /*
@@ -4177,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 	return 0;
 }
 
-static unsigned int
+static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
-			     const struct drm_plane_state *pstate,
+			     const struct intel_plane_state *intel_pstate,
 			     const int plane)
 {
-	struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
-	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+	struct intel_plane *intel_plane =
+		to_intel_plane(intel_pstate->base.plane);
 	uint32_t data_rate;
 	uint32_t width = 0, height = 0;
 	struct drm_framebuffer *fb;
 	u32 format;
 	uint_fixed_16_16_t down_scale_amount;
+	u64 rate;
 
 	if (!intel_pstate->base.visible)
 		return 0;
 
-	fb = pstate->fb;
+	fb = intel_pstate->base.fb;
 	format = fb->format->format;
 
 	if (intel_plane->id == PLANE_CURSOR)
@@ -4215,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 		height /= 2;
 	}
 
-	data_rate = width * height * fb->format->cpp[plane];
+	data_rate = width * height;
 
 	down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
 
-	return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+	rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+	rate *= fb->format->cpp[plane];
+	return rate;
 }
 
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- *   3 * 4096 * 8192  * 4 < 2^32
- */
-static unsigned int
+static u64
 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
-				 unsigned int *plane_data_rate,
-				 unsigned int *uv_plane_data_rate)
+				 u64 *plane_data_rate,
+				 u64 *uv_plane_data_rate)
 {
 	struct drm_crtc_state *cstate = &intel_cstate->base;
 	struct drm_atomic_state *state = cstate->state;
 	struct drm_plane *plane;
 	const struct drm_plane_state *pstate;
-	unsigned int total_data_rate = 0;
+	u64 total_data_rate = 0;
 
 	if (WARN_ON(!state))
 		return 0;
@@ -4244,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 	/* Calculate and cache data rate for each plane */
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
 		enum plane_id plane_id = to_intel_plane(plane)->id;
-		unsigned int rate;
+		u64 rate;
+		const struct intel_plane_state *intel_pstate =
+			to_intel_plane_state(pstate);
 
 		/* packed/y */
 		rate = skl_plane_relative_data_rate(intel_cstate,
-						    pstate, 0);
+						    intel_pstate, 0);
 		plane_data_rate[plane_id] = rate;
-
 		total_data_rate += rate;
 
 		/* uv-plane */
 		rate = skl_plane_relative_data_rate(intel_cstate,
-						    pstate, 1);
+						    intel_pstate, 1);
 		uv_plane_data_rate[plane_id] = rate;
-
 		total_data_rate += rate;
 	}
 
 	return total_data_rate;
 }
 
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+				 u64 *plane_data_rate)
+{
+	struct drm_crtc_state *cstate = &intel_cstate->base;
+	struct drm_atomic_state *state = cstate->state;
+	struct drm_plane *plane;
+	const struct drm_plane_state *pstate;
+	u64 total_data_rate = 0;
+
+	if (WARN_ON(!state))
+		return 0;
+
+	/* Calculate and cache data rate for each plane */
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+		const struct intel_plane_state *intel_pstate =
+			to_intel_plane_state(pstate);
+		enum plane_id plane_id = to_intel_plane(plane)->id;
+		u64 rate;
+
+		if (!intel_pstate->linked_plane) {
+			rate = skl_plane_relative_data_rate(intel_cstate,
+							    intel_pstate, 0);
+			plane_data_rate[plane_id] = rate;
+			total_data_rate += rate;
+		} else {
+			enum plane_id y_plane_id;
+
+			/*
+			 * The slave plane might not iterate in
+			 * drm_atomic_crtc_state_for_each_plane_state(),
+			 * and needs the master plane state which may be
+			 * NULL if we try get_new_plane_state(), so we
+			 * always calculate from the master.
+			 */
+			if (intel_pstate->slave)
+				continue;
+
+			/* Y plane rate is calculated on the slave */
+			rate = skl_plane_relative_data_rate(intel_cstate,
+							    intel_pstate, 0);
+			y_plane_id = intel_pstate->linked_plane->id;
+			plane_data_rate[y_plane_id] = rate;
+			total_data_rate += rate;
+
+			rate = skl_plane_relative_data_rate(intel_cstate,
+							    intel_pstate, 1);
+			plane_data_rate[plane_id] = rate;
+			total_data_rate += rate;
+		}
+	}
+
+	return total_data_rate;
+}
+
 static uint16_t
 skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
 {
@@ -4336,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
 
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
 		enum plane_id plane_id = to_intel_plane(plane)->id;
+		struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
 
 		if (plane_id == PLANE_CURSOR)
 			continue;
 
-		if (!pstate->visible)
+		/* slave plane must be invisible and calculated from master */
+		if (!pstate->visible || WARN_ON(plane_state->slave))
 			continue;
 
-		minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
-		uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+		if (!plane_state->linked_plane) {
+			minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+			uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+		} else {
+			enum plane_id y_plane_id =
+				plane_state->linked_plane->id;
+
+			minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+			minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+		}
 	}
 
 	minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4356,23 +4408,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 {
 	struct drm_atomic_state *state = cstate->base.state;
 	struct drm_crtc *crtc = cstate->base.crtc;
-	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
 	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
 	uint16_t alloc_size, start;
 	uint16_t minimum[I915_MAX_PLANES] = {};
 	uint16_t uv_minimum[I915_MAX_PLANES] = {};
-	unsigned int total_data_rate;
+	u64 total_data_rate;
 	enum plane_id plane_id;
 	int num_active;
-	unsigned int plane_data_rate[I915_MAX_PLANES] = {};
-	unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+	u64 plane_data_rate[I915_MAX_PLANES] = {};
+	u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
 	uint16_t total_min_blocks = 0;
 
 	/* Clear the partitioning for disabled planes. */
-	memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-	memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+	memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+	memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
 
 	if (WARN_ON(!state))
 		return 0;
@@ -4382,11 +4433,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		return 0;
 	}
 
-	total_data_rate = skl_get_total_relative_data_rate(cstate,
-							   plane_data_rate,
-							   uv_plane_data_rate);
-	skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
-					   alloc, &num_active);
+	if (INTEL_GEN(dev_priv) < 11)
+		total_data_rate =
+			skl_get_total_relative_data_rate(cstate,
+							 plane_data_rate,
+							 uv_plane_data_rate);
+	else
+		total_data_rate =
+			icl_get_total_relative_data_rate(cstate,
+							 plane_data_rate);
+
+	skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+					   ddb, alloc, &num_active);
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
@@ -4412,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	}
 
 	alloc_size -= total_min_blocks;
-	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
-	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
 
 	/*
 	 * 2. Distribute the remaining space in proportion to the amount of
@@ -4426,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
 	start = alloc->start;
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-		unsigned int data_rate, uv_data_rate;
+		u64 data_rate, uv_data_rate;
 		uint16_t plane_blocks, uv_plane_blocks;
 
 		if (plane_id == PLANE_CURSOR)
@@ -4440,13 +4498,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		 * result is < available as data_rate / total_data_rate < 1
 		 */
 		plane_blocks = minimum[plane_id];
-		plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
-					total_data_rate);
+		plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
 
 		/* Leave disabled planes at (0,0) */
 		if (data_rate) {
-			ddb->plane[pipe][plane_id].start = start;
-			ddb->plane[pipe][plane_id].end = start + plane_blocks;
+			cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+			cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
 		}
 
 		start += plane_blocks;
@@ -4455,12 +4512,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		uv_data_rate = uv_plane_data_rate[plane_id];
 
 		uv_plane_blocks = uv_minimum[plane_id];
-		uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
-					   total_data_rate);
+		uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+		/* Gen11+ uses a separate plane for UV watermarks */
+		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
 
 		if (uv_data_rate) {
-			ddb->uv_plane[pipe][plane_id].start = start;
-			ddb->uv_plane[pipe][plane_id].end =
+			cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+			cstate->wm.skl.plane_ddb_uv[plane_id].end =
 				start + uv_plane_blocks;
 		}
 
@@ -4514,7 +4573,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
 }
 
 static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
 {
 	uint32_t pixel_rate;
 	uint32_t crtc_htotal;
@@ -4557,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
 }
 
 static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
-			    struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
 			    const struct intel_plane_state *intel_pstate,
-			    struct skl_wm_params *wp, int plane_id)
+			    struct skl_wm_params *wp, int color_plane)
 {
 	struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_plane_state *pstate = &intel_pstate->base;
 	const struct drm_framebuffer *fb = pstate->fb;
 	uint32_t interm_pbpl;
@@ -4570,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		to_intel_atomic_state(cstate->base.state);
 	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
 
-	if (!intel_wm_plane_visible(cstate, intel_pstate))
-		return 0;
-
 	/* only NV12 format has two planes */
-	if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+	if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
 		DRM_DEBUG_KMS("Non NV12 format have single plane\n");
 		return -EINVAL;
 	}
@@ -4599,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	}
 
-	if (plane_id == 1 && wp->is_planar)
+	if (color_plane == 1 && wp->is_planar)
 		wp->width /= 2;
 
-	wp->cpp = fb->format->cpp[plane_id];
+	wp->cpp = fb->format->cpp[color_plane];
 	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
 							     intel_pstate);
 
@@ -4664,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
-				struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
 				const struct intel_plane_state *intel_pstate,
 				uint16_t ddb_allocation,
 				int level,
@@ -4673,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				const struct skl_wm_level *result_prev,
 				struct skl_wm_level *result /* out */)
 {
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_pstate->base.plane->dev);
 	const struct drm_plane_state *pstate = &intel_pstate->base;
 	uint32_t latency = dev_priv->wm.skl_latency[level];
 	uint_fixed_16_16_t method1, method2;
@@ -4683,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
 	uint32_t min_disp_buf_needed;
 
-	if (latency == 0 ||
-	    !intel_wm_plane_visible(cstate, intel_pstate)) {
-		result->plane_en = false;
-		return 0;
-	}
+	if (latency == 0)
+		return level == 0 ? -EINVAL : 0;
 
 	/* Display WA #1141: kbl,cfl */
 	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4710,15 +4764,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	} else {
 		if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
 		     wp->dbuf_block_size < 1) &&
-		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
 			selected_result = method2;
-		else if (ddb_allocation >=
-			 fixed16_to_u32_round_up(wp->plane_blocks_per_line))
-			selected_result = min_fixed16(method1, method2);
-		else if (latency >= wp->linetime_us)
-			selected_result = min_fixed16(method1, method2);
-		else
+		} else if (ddb_allocation >=
+			 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+			if (IS_GEN9(dev_priv) &&
+			    !IS_GEMINILAKE(dev_priv))
+				selected_result = min_fixed16(method1, method2);
+			else
+				selected_result = method2;
+		} else if (latency >= wp->linetime_us) {
+			if (IS_GEN9(dev_priv) &&
+			    !IS_GEMINILAKE(dev_priv))
+				selected_result = min_fixed16(method1, method2);
+			else
+				selected_result = method2;
+		} else {
 			selected_result = method1;
+		}
 	}
 
 	res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4775,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	if ((level > 0 && res_lines > 31) ||
 	    res_blocks >= ddb_allocation ||
 	    min_disp_buf_needed >= ddb_allocation) {
-		result->plane_en = false;
-
 		/*
 		 * If there are no valid level 0 watermarks, then we can't
 		 * support this display configuration.
@@ -4794,17 +4855,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 		}
 	}
 
-	/*
-	 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
-	 * disable wm level 1-7 on NV12 planes
-	 */
-	if (wp->is_planar && level >= 1 &&
-	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
-	     IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
-		result->plane_en = false;
-		return 0;
-	}
-
 	/* The number of lines are ignored for the level 0 watermark. */
 	result->plane_res_b = res_blocks;
 	result->plane_res_l = res_lines;
@@ -4814,43 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 }
 
 static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
-		      struct skl_ddb_allocation *ddb,
-		      struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
 		      const struct intel_plane_state *intel_pstate,
+		      uint16_t ddb_blocks,
 		      const struct skl_wm_params *wm_params,
-		      struct skl_plane_wm *wm,
-		      int plane_id)
+		      struct skl_wm_level *levels)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
-	struct drm_plane *plane = intel_pstate->base.plane;
-	struct intel_plane *intel_plane = to_intel_plane(plane);
-	uint16_t ddb_blocks;
-	enum pipe pipe = intel_crtc->pipe;
+	struct drm_i915_private *dev_priv =
+		to_i915(intel_pstate->base.plane->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
-	enum plane_id intel_plane_id = intel_plane->id;
+	struct skl_wm_level *result_prev = &levels[0];
 	int ret;
 
-	if (WARN_ON(!intel_pstate->base.fb))
-		return -EINVAL;
-
-	ddb_blocks = plane_id ?
-		     skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
-		     skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
 	for (level = 0; level <= max_level; level++) {
-		struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
-							  &wm->wm[level];
-		struct skl_wm_level *result_prev;
+		struct skl_wm_level *result = &levels[level];
 
-		if (level)
-			result_prev = plane_id ? &wm->uv_wm[level - 1] :
-						  &wm->wm[level - 1];
-		else
-			result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
-
-		ret = skl_compute_plane_wm(dev_priv,
-					   cstate,
+		ret = skl_compute_plane_wm(cstate,
 					   intel_pstate,
 					   ddb_blocks,
 					   level,
@@ -4859,16 +4888,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 					   result);
 		if (ret)
 			return ret;
-	}
 
-	if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
-		wm->is_planar = true;
+		result_prev = result;
+	}
 
 	return 0;
 }
 
 static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
 	struct drm_atomic_state *state = cstate->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4890,42 +4918,50 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
 	return linetime_wm;
 }
 
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
-				      struct skl_wm_params *wp,
-				      struct skl_wm_level *wm_l0,
-				      uint16_t ddb_allocation,
-				      struct skl_wm_level *trans_wm /* out */)
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+				      const struct skl_wm_params *wp,
+				      struct skl_plane_wm *wm,
+				      uint16_t ddb_allocation)
 {
 	struct drm_device *dev = cstate->base.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
 	uint16_t trans_min, trans_y_tile_min;
 	const uint16_t trans_amount = 10; /* This is configurable amount */
-	uint16_t trans_offset_b, res_blocks;
-
-	if (!cstate->base.active)
-		goto exit;
+	uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
 
 	/* Transition WM are not recommended by HW team for GEN9 */
 	if (INTEL_GEN(dev_priv) <= 9)
-		goto exit;
+		return;
 
 	/* Transition WM don't make any sense if ipc is disabled */
 	if (!dev_priv->ipc_enabled)
-		goto exit;
+		return;
 
-	trans_min = 0;
-	if (INTEL_GEN(dev_priv) >= 10)
+	trans_min = 14;
+	if (INTEL_GEN(dev_priv) >= 11)
 		trans_min = 4;
 
 	trans_offset_b = trans_min + trans_amount;
 
+	/*
+	 * The spec asks for Selected Result Blocks for wm0 (the real value),
+	 * not Result Blocks (the integer value). Pay attention to the capital
+	 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+	 * and since we later will have to get the ceiling of the sum in the
+	 * transition watermarks calculation, we can just pretend Selected
+	 * Result Blocks is Result Blocks minus 1 and it should work for the
+	 * current platforms.
+	 */
+	wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+
 	if (wp->y_tiled) {
 		trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
 							wp->y_tile_minimum);
-		res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+		res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
 				trans_offset_b;
 	} else {
-		res_blocks = wm_l0->plane_res_b + trans_offset_b;
+		res_blocks = wm0_sel_res_b + trans_offset_b;
 
 		/* WA BUG:1938466 add one block for non y-tile planes */
 		if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4936,25 +4972,132 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
 	res_blocks += 1;
 
 	if (res_blocks < ddb_allocation) {
-		trans_wm->plane_res_b = res_blocks;
-		trans_wm->plane_en = true;
-		return;
+		wm->trans_wm.plane_res_b = res_blocks;
+		wm->trans_wm.plane_en = true;
+	}
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+				     const struct intel_plane_state *plane_state,
+				     enum plane_id plane_id, int color_plane)
+{
+	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+	u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
+	struct skl_wm_params wm_params;
+	int ret;
+
+	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+					  &wm_params, color_plane);
+	if (ret)
+		return ret;
+
+	ret = skl_compute_wm_levels(crtc_state, plane_state,
+				    ddb_blocks, &wm_params, wm->wm);
+	if (ret)
+		return ret;
+
+	skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+
+	return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+				 const struct intel_plane_state *plane_state,
+				 enum plane_id plane_id)
+{
+	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+	u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
+	struct skl_wm_params wm_params;
+	int ret;
+
+	wm->is_planar = true;
+
+	/* uv plane watermarks must also be validated for NV12/Planar */
+	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+					  &wm_params, 1);
+	if (ret)
+		return ret;
+
+	ret = skl_compute_wm_levels(crtc_state, plane_state,
+				    ddb_blocks, &wm_params, wm->uv_wm);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+			      struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	enum plane_id plane_id = plane->id;
+	int ret;
+
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
+		return 0;
+
+	ret = skl_build_plane_wm_single(crtc_state, plane_state,
+					plane_id, 0);
+	if (ret)
+		return ret;
+
+	if (fb->format->is_yuv && fb->format->num_planes > 1) {
+		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+					    plane_id);
+		if (ret)
+			return ret;
 	}
 
-exit:
-	trans_wm->plane_en = false;
+	return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+			      struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
+{
+	enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+	int ret;
+
+	/* Watermarks calculated in master */
+	if (plane_state->slave)
+		return 0;
+
+	if (plane_state->linked_plane) {
+		const struct drm_framebuffer *fb = plane_state->base.fb;
+		enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+		WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+		WARN_ON(!fb->format->is_yuv ||
+			fb->format->num_planes == 1);
+
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						y_plane_id, 0);
+		if (ret)
+			return ret;
+
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						plane_id, 1);
+		if (ret)
+			return ret;
+	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+		ret = skl_build_plane_wm_single(crtc_state, plane_state,
+						plane_id, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
-			     struct skl_ddb_allocation *ddb,
 			     struct skl_pipe_wm *pipe_wm)
 {
-	struct drm_device *dev = cstate->base.crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
 	struct drm_crtc_state *crtc_state = &cstate->base;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_plane *plane;
 	const struct drm_plane_state *pstate;
-	struct skl_plane_wm *wm;
 	int ret;
 
 	/*
@@ -4966,44 +5109,15 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
 		const struct intel_plane_state *intel_pstate =
 						to_intel_plane_state(pstate);
-		enum plane_id plane_id = to_intel_plane(plane)->id;
-		struct skl_wm_params wm_params;
-		enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
-		uint16_t ddb_blocks;
 
-		wm = &pipe_wm->planes[plane_id];
-		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
-
-		ret = skl_compute_plane_wm_params(dev_priv, cstate,
-						  intel_pstate, &wm_params, 0);
+		if (INTEL_GEN(dev_priv) >= 11)
+			ret = icl_build_plane_wm(pipe_wm,
+						 cstate, intel_pstate);
+		else
+			ret = skl_build_plane_wm(pipe_wm,
+						 cstate, intel_pstate);
 		if (ret)
 			return ret;
-
-		ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-					    intel_pstate, &wm_params, wm, 0);
-		if (ret)
-			return ret;
-
-		skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
-					  ddb_blocks, &wm->trans_wm);
-
-		/* uv plane watermarks must also be validated for NV12/Planar */
-		if (wm_params.is_planar) {
-			memset(&wm_params, 0, sizeof(struct skl_wm_params));
-			wm->is_planar = true;
-
-			ret = skl_compute_plane_wm_params(dev_priv, cstate,
-							  intel_pstate,
-							  &wm_params, 1);
-			if (ret)
-				return ret;
-
-			ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-						    intel_pstate, &wm_params,
-						    wm, 1);
-			if (ret)
-				return ret;
-		}
 	}
 
 	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5016,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 				const struct skl_ddb_entry *entry)
 {
 	if (entry->end)
-		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+		I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
 	else
-		I915_WRITE(reg, 0);
+		I915_WRITE_FW(reg, 0);
 }
 
 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5033,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
 		val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
 	}
 
-	I915_WRITE(reg, val);
+	I915_WRITE_FW(reg, val);
 }
 
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
-			       const struct skl_plane_wm *wm,
-			       const struct skl_ddb_allocation *ddb,
-			       enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+			const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
-	enum pipe pipe = intel_crtc->pipe;
+	enum plane_id plane_id = plane->id;
+	enum pipe pipe = plane->pipe;
+	const struct skl_plane_wm *wm =
+		&crtc_state->wm.skl.optimal.planes[plane_id];
+	const struct skl_ddb_entry *ddb_y =
+		&crtc_state->wm.skl.plane_ddb_y[plane_id];
+	const struct skl_ddb_entry *ddb_uv =
+		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
 
 	for (level = 0; level <= max_level; level++) {
 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5054,35 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
 			   &wm->trans_wm);
 
-	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-			    &ddb->plane[pipe][plane_id]);
-	/* FIXME: add proper NV12 support for ICL. */
-	if (INTEL_GEN(dev_priv) >= 11)
-		return skl_ddb_entry_write(dev_priv,
-					   PLANE_BUF_CFG(pipe, plane_id),
-					   &ddb->plane[pipe][plane_id]);
-	if (wm->is_planar) {
-		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-				    &ddb->uv_plane[pipe][plane_id]);
+	if (INTEL_GEN(dev_priv) >= 11) {
 		skl_ddb_entry_write(dev_priv,
-				    PLANE_NV12_BUF_CFG(pipe, plane_id),
-				    &ddb->plane[pipe][plane_id]);
-	} else {
-		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-				    &ddb->plane[pipe][plane_id]);
-		I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+				    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+		return;
 	}
+
+	if (wm->is_planar)
+		swap(ddb_y, ddb_uv);
+
+	skl_ddb_entry_write(dev_priv,
+			    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+	skl_ddb_entry_write(dev_priv,
+			    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
 }
 
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
-				const struct skl_plane_wm *wm,
-				const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+			 const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = &intel_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
-	enum pipe pipe = intel_crtc->pipe;
+	enum plane_id plane_id = plane->id;
+	enum pipe pipe = plane->pipe;
+	const struct skl_plane_wm *wm =
+		&crtc_state->wm.skl.optimal.planes[plane_id];
+	const struct skl_ddb_entry *ddb =
+		&crtc_state->wm.skl.plane_ddb_y[plane_id];
 
 	for (level = 0; level <= max_level; level++) {
 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5090,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
 	}
 	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
 
-	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-			    &ddb->plane[pipe][PLANE_CURSOR]);
+	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
 }
 
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
 			 const struct skl_wm_level *l2)
 {
-	if (l1->plane_en != l2->plane_en)
-		return false;
+	return l1->plane_en == l2->plane_en &&
+		l1->plane_res_l == l2->plane_res_l &&
+		l1->plane_res_b == l2->plane_res_b;
+}
 
-	/* If both planes aren't enabled, the rest shouldn't matter */
-	if (!l1->plane_en)
-		return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+				const struct skl_plane_wm *wm1,
+				const struct skl_plane_wm *wm2)
+{
+	int level, max_level = ilk_wm_max_level(dev_priv);
 
-	return (l1->plane_res_l == l2->plane_res_l &&
-		l1->plane_res_b == l2->plane_res_b);
+	for (level = 0; level <= max_level; level++) {
+		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+		    !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+			return false;
+	}
+
+	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
 }
 
 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5114,16 +5236,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
 	return a->start < b->end && b->start < a->end;
 }
 
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
-				 const struct skl_ddb_entry **entries,
-				 const struct skl_ddb_entry *ddb,
-				 int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+				 const struct skl_ddb_entry entries[],
+				 int num_entries, int ignore_idx)
 {
-	enum pipe pipe;
+	int i;
 
-	for_each_pipe(dev_priv, pipe) {
-		if (pipe != ignore && entries[pipe] &&
-		    skl_ddb_entries_overlap(ddb, entries[pipe]))
+	for (i = 0; i < num_entries; i++) {
+		if (i != ignore_idx &&
+		    skl_ddb_entries_overlap(ddb, &entries[i]))
 			return true;
 	}
 
@@ -5133,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
 			      const struct skl_pipe_wm *old_pipe_wm,
 			      struct skl_pipe_wm *pipe_wm, /* out */
-			      struct skl_ddb_allocation *ddb, /* out */
 			      bool *changed /* out */)
 {
 	struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
 	int ret;
 
-	ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+	ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
 	if (ret)
 		return ret;
 
@@ -5165,32 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
 }
 
 static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+			    struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_atomic_state *state = cstate->base.state;
-	struct drm_device *dev = state->dev;
-	struct drm_crtc *crtc = cstate->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
-	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
-	struct drm_plane_state *plane_state;
-	struct drm_plane *plane;
-	enum pipe pipe = intel_crtc->pipe;
+	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_plane *plane;
 
-	drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
-		enum plane_id plane_id = to_intel_plane(plane)->id;
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		struct intel_plane_state *plane_state;
+		enum plane_id plane_id = plane->id;
 
-		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
-					&new_ddb->plane[pipe][plane_id]) &&
-		    skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
-					&new_ddb->uv_plane[pipe][plane_id]))
+		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+					&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
 			continue;
 
-		plane_state = drm_atomic_get_plane_state(state, plane);
+		plane_state = intel_atomic_get_plane_state(state, plane);
 		if (IS_ERR(plane_state))
 			return PTR_ERR(plane_state);
+
+		new_crtc_state->update_planes |= BIT(plane_id);
 	}
 
 	return 0;
@@ -5202,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
 	const struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+	struct intel_crtc_state *old_crtc_state;
+	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *cstate;
 	int ret, i;
 
 	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
 
-	for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
-		ret = skl_allocate_pipe_ddb(cstate, ddb);
+	for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
 		if (ret)
 			return ret;
 
-		ret = skl_ddb_add_affected_planes(cstate);
+		ret = skl_ddb_add_affected_planes(old_crtc_state,
+						  new_crtc_state);
 		if (ret)
 			return ret;
 	}
@@ -5222,38 +5342,31 @@ skl_compute_ddb(struct drm_atomic_state *state)
 }
 
 static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
 {
-	const struct drm_device *dev = state->dev;
-	const struct drm_i915_private *dev_priv = to_i915(dev);
-	const struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(state);
-	const struct drm_crtc *crtc;
-	const struct drm_crtc_state *cstate;
-	const struct intel_plane *intel_plane;
-	const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
-	const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_crtc_state *old_crtc_state;
+	const struct intel_crtc_state *new_crtc_state;
+	struct intel_plane *plane;
+	struct intel_crtc *crtc;
 	int i;
 
-	for_each_new_crtc_in_state(state, crtc, cstate, i) {
-		const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		enum pipe pipe = intel_crtc->pipe;
-
-		for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-			enum plane_id plane_id = intel_plane->id;
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+			enum plane_id plane_id = plane->id;
 			const struct skl_ddb_entry *old, *new;
 
-			old = &old_ddb->plane[pipe][plane_id];
-			new = &new_ddb->plane[pipe][plane_id];
+			old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+			new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
 
 			if (skl_ddb_entry_equal(old, new))
 				continue;
 
-			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
-					 intel_plane->base.base.id,
-					 intel_plane->base.name,
-					 old->start, old->end,
-					 new->start, new->end);
+			DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+				      plane->base.base.id, plane->base.name,
+				      old->start, old->end,
+				      new->start, new->end);
 		}
 	}
 }
@@ -5348,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
 	return 0;
 }
 
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+				      struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_plane *plane;
+
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		struct intel_plane_state *plane_state;
+		enum plane_id plane_id = plane->id;
+
+		/*
+		 * Force a full wm update for every plane on modeset.
+		 * Required because the reset value of the wm registers
+		 * is non-zero, whereas we want all disabled planes to
+		 * have zero watermarks. So if we turn off the relevant
+		 * power well the hardware state will go out of sync
+		 * with the software state.
+		 */
+		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+		    skl_plane_wm_equals(dev_priv,
+					&old_crtc_state->wm.skl.optimal.planes[plane_id],
+					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
+			continue;
+
+		plane_state = intel_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+
+		new_crtc_state->update_planes |= BIT(plane_id);
+	}
+
+	return 0;
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -5387,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
 			&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
 
 		pipe_wm = &intel_cstate->wm.skl.optimal;
-		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
-					 &results->ddb, &changed);
+		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+		if (ret)
+			return ret;
+
+		ret = skl_wm_add_affected_planes(intel_state,
+						 to_intel_crtc(crtc));
 		if (ret)
 			return ret;
 
@@ -5402,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 		intel_cstate->update_wm_pre = true;
 	}
 
-	skl_print_wm_changes(state);
+	skl_print_wm_changes(intel_state);
 
 	return 0;
 }
@@ -5413,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
 	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
-	const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
 	enum pipe pipe = crtc->pipe;
-	enum plane_id plane_id;
 
 	if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
 		return;
 
 	I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		if (plane_id != PLANE_CURSOR)
-			skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
-					   ddb, plane_id);
-		else
-			skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
-					    ddb);
-	}
 }
 
 static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5439,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_ddb_values *results = &state->wm_results;
-	struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
-	enum pipe pipe = intel_crtc->pipe;
 
 	if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
 		return;
@@ -5450,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	if (cstate->base.active_changed)
 		skl_atomic_update_crtc_wm(state, cstate);
 
-	memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
-	       sizeof(hw_vals->ddb.uv_plane[pipe]));
-	memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
-	       sizeof(hw_vals->ddb.plane[pipe]));
-
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
@@ -5605,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
 	if (dev_priv->active_crtcs) {
 		/* Fully recompute DDB on first atomic commit */
 		dev_priv->wm.distrust_bios_wm = true;
-	} else {
-		/*
-		 * Easy/common case; just sanitize DDB now if everything off
-		 * Keep dbuf slice info intact
-		 */
-		memset(ddb->plane, 0, sizeof(ddb->plane));
-		memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
 	}
 }
 
@@ -6155,14 +6307,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	/* Display WA #0477 WaDisableIPC: skl */
-	if (IS_SKYLAKE(dev_priv))
-		dev_priv->ipc_enabled = false;
-
-	/* Display WA #1141: SKL:all KBL:all CFL */
-	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
-	    !dev_priv->dram_info.symmetric_memory)
-		dev_priv->ipc_enabled = false;
+	if (!HAS_IPC(dev_priv))
+		return;
 
 	val = I915_READ(DISP_ARB_CTL2);
 
@@ -6176,11 +6322,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 
 void intel_init_ipc(struct drm_i915_private *dev_priv)
 {
-	dev_priv->ipc_enabled = false;
 	if (!HAS_IPC(dev_priv))
 		return;
 
-	dev_priv->ipc_enabled = true;
+	/* Display WA #1141: SKL:all KBL:all CFL */
+	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+		dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+	else
+		dev_priv->ipc_enabled = true;
+
 	intel_enable_ipc(dev_priv);
 }
 
@@ -8774,6 +8924,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
 	/* This is not an Wa. Enable to reduce Sampler power */
 	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
 		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+	/* WaEnable32PlaneMode:icl */
+	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+		   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
 }
 
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9351,8 +9505,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_i915_private *dev_priv)
 {
-	intel_fbc_init(dev_priv);
-
 	/* For cxsr */
 	if (IS_PINEVIEW(dev_priv))
 		i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b5..419e563 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,14 @@ static bool psr_global_enabled(u32 debug)
 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
 			       const struct intel_crtc_state *crtc_state)
 {
+	/* Disable PSR2 by default for all platforms */
+	if (i915_modparams.enable_psr == -1)
+		return false;
+
+	/* Cannot enable DSC and PSR2 simultaneously */
+	WARN_ON(crtc_state->dsc_params.compression_enable &&
+		crtc_state->has_psr2);
+
 	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_FORCE_PSR1:
 		return false;
@@ -79,25 +87,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
 	}
 }
 
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+	switch (cpu_transcoder) {
+	case TRANSCODER_A:
+		return EDP_PSR_TRANSCODER_A_SHIFT;
+	case TRANSCODER_B:
+		return EDP_PSR_TRANSCODER_B_SHIFT;
+	case TRANSCODER_C:
+		return EDP_PSR_TRANSCODER_C_SHIFT;
+	default:
+		MISSING_CASE(cpu_transcoder);
+		/* fallthrough */
+	case TRANSCODER_EDP:
+		return EDP_PSR_TRANSCODER_EDP_SHIFT;
+	}
+}
+
 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
 {
 	u32 debug_mask, mask;
+	enum transcoder cpu_transcoder;
+	u32 transcoders = BIT(TRANSCODER_EDP);
 
-	mask = EDP_PSR_ERROR(TRANSCODER_EDP);
-	debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
-		     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
+	if (INTEL_GEN(dev_priv) >= 8)
+		transcoders |= BIT(TRANSCODER_A) |
+			       BIT(TRANSCODER_B) |
+			       BIT(TRANSCODER_C);
 
-	if (INTEL_GEN(dev_priv) >= 8) {
-		mask |= EDP_PSR_ERROR(TRANSCODER_A) |
-			EDP_PSR_ERROR(TRANSCODER_B) |
-			EDP_PSR_ERROR(TRANSCODER_C);
+	debug_mask = 0;
+	mask = 0;
+	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+		int shift = edp_psr_shift(cpu_transcoder);
 
-		debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
-			      EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
-			      EDP_PSR_POST_EXIT(TRANSCODER_B) |
-			      EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
-			      EDP_PSR_POST_EXIT(TRANSCODER_C) |
-			      EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+		mask |= EDP_PSR_ERROR(shift);
+		debug_mask |= EDP_PSR_POST_EXIT(shift) |
+			      EDP_PSR_PRE_ENTRY(shift);
 	}
 
 	if (debug & I915_PSR_DEBUG_IRQ)
@@ -148,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	u32 transcoders = BIT(TRANSCODER_EDP);
 	enum transcoder cpu_transcoder;
 	ktime_t time_ns =  ktime_get();
+	u32 mask = 0;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		transcoders |= BIT(TRANSCODER_A) |
@@ -155,18 +181,32 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			       BIT(TRANSCODER_C);
 
 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
-		/* FIXME: Exit PSR and link train manually when this happens. */
-		if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
-			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
-				      transcoder_name(cpu_transcoder));
+		int shift = edp_psr_shift(cpu_transcoder);
 
-		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+		if (psr_iir & EDP_PSR_ERROR(shift)) {
+			DRM_WARN("[transcoder %s] PSR aux error\n",
+				 transcoder_name(cpu_transcoder));
+
+			dev_priv->psr.irq_aux_error = true;
+
+			/*
+			 * If this interruption is not masked it will keep
+			 * interrupting so fast that it prevents the scheduled
+			 * work to run.
+			 * Also after a PSR error, we don't want to arm PSR
+			 * again so we don't care about unmask the interruption
+			 * or unset irq_aux_error.
+			 */
+			mask |= EDP_PSR_ERROR(shift);
+		}
+
+		if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
 			dev_priv->psr.last_entry_attempt = time_ns;
 			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
 				      transcoder_name(cpu_transcoder));
 		}
 
-		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+		if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
 			dev_priv->psr.last_exit = time_ns;
 			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
 				      transcoder_name(cpu_transcoder));
@@ -180,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			}
 		}
 	}
+
+	if (mask) {
+		mask |= I915_READ(EDP_PSR_IMR);
+		I915_WRITE(EDP_PSR_IMR, mask);
+
+		schedule_work(&dev_priv->psr.work);
+	}
 }
 
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -294,7 +341,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
 		psr_vsc.sdp_header.HB3 = 0x8;
 	}
 
-	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+	intel_dig_port->write_infoframe(&intel_dig_port->base,
+					crtc_state,
 					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 }
 
@@ -458,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	if (!dev_priv->psr.sink_psr2_support)
 		return false;
 
+	/*
+	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
+	 * resolution requires DSC to be enabled, priority is given to DSC
+	 * over PSR2.
+	 */
+	if (crtc_state->dsc_params.compression_enable) {
+		DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+		return false;
+	}
+
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
 		psr_max_h = 4096;
 		psr_max_v = 2304;
@@ -503,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 		return;
 	}
 
-	if (IS_HASWELL(dev_priv) &&
-	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
-		      S3D_ENABLE) {
-		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+	if (dev_priv->psr.sink_not_reliable) {
+		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
 		return;
 	}
 
@@ -553,11 +609,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 	dev_priv->psr.active = true;
 }
 
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+					 enum transcoder cpu_transcoder)
+{
+	static const i915_reg_t regs[] = {
+		[TRANSCODER_A] = CHICKEN_TRANS_A,
+		[TRANSCODER_B] = CHICKEN_TRANS_B,
+		[TRANSCODER_C] = CHICKEN_TRANS_C,
+		[TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+	};
+
+	WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+	if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+		    !regs[cpu_transcoder].reg))
+		cpu_transcoder = TRANSCODER_A;
+
+	return regs[cpu_transcoder];
+}
+
 static void intel_psr_enable_source(struct intel_dp *intel_dp,
 				    const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 mask;
 
 	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
 	 * use hardcoded values PSR AUX transactions
@@ -566,37 +642,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 		hsw_psr_setup_aux(intel_dp);
 
 	if (dev_priv->psr.psr2_enabled) {
-		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+		i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+							cpu_transcoder);
+		u32 chicken = I915_READ(reg);
 
-		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+		if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
 			chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
 				   | PSR2_ADD_VERTICAL_LINE_COUNT);
 
 		else
 			chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
-		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
-
-		I915_WRITE(EDP_PSR_DEBUG,
-			   EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD |
-			   EDP_PSR_DEBUG_MASK_LPSP |
-			   EDP_PSR_DEBUG_MASK_MAX_SLEEP |
-			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
-	} else {
-		/*
-		 * Per Spec: Avoid continuous PSR exit by masking MEMUP
-		 * and HPD. also mask LPSP to avoid dependency on other
-		 * drivers that might block runtime_pm besides
-		 * preventing  other hw tracking issues now we can rely
-		 * on frontbuffer tracking.
-		 */
-		I915_WRITE(EDP_PSR_DEBUG,
-			   EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD |
-			   EDP_PSR_DEBUG_MASK_LPSP |
-			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
-			   EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+		I915_WRITE(reg, chicken);
 	}
+
+	/*
+	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+	 * mask LPSP to avoid dependency on other drivers that might block
+	 * runtime_pm besides preventing  other hw tracking issues now we
+	 * can rely on frontbuffer tracking.
+	 */
+	mask = EDP_PSR_DEBUG_MASK_MEMUP |
+	       EDP_PSR_DEBUG_MASK_HPD |
+	       EDP_PSR_DEBUG_MASK_LPSP |
+	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+	I915_WRITE(EDP_PSR_DEBUG, mask);
 }
 
 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -646,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 	dev_priv->psr.prepared = true;
+	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
 	if (psr_global_enabled(dev_priv->psr.debug))
 		intel_psr_enable_locked(dev_priv, crtc_state);
@@ -656,49 +730,34 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void
-intel_psr_disable_source(struct intel_dp *intel_dp)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 val;
 
-	if (dev_priv->psr.active) {
-		i915_reg_t psr_status;
-		u32 psr_status_mask;
-
-		if (dev_priv->psr.psr2_enabled) {
-			psr_status = EDP_PSR2_STATUS;
-			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-
-			I915_WRITE(EDP_PSR2_CTL,
-				   I915_READ(EDP_PSR2_CTL) &
-				   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
-
-		} else {
-			psr_status = EDP_PSR_STATUS;
-			psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-
-			I915_WRITE(EDP_PSR_CTL,
-				   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
-		}
-
-		/* Wait till PSR is idle */
-		if (intel_wait_for_register(dev_priv,
-					    psr_status, psr_status_mask, 0,
-					    2000))
-			DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
-		dev_priv->psr.active = false;
-	} else {
-		if (dev_priv->psr.psr2_enabled)
+	if (!dev_priv->psr.active) {
+		if (INTEL_GEN(dev_priv) >= 9)
 			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
-		else
-			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+		return;
 	}
+
+	if (dev_priv->psr.psr2_enabled) {
+		val = I915_READ(EDP_PSR2_CTL);
+		WARN_ON(!(val & EDP_PSR2_ENABLE));
+		I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+	} else {
+		val = I915_READ(EDP_PSR_CTL);
+		WARN_ON(!(val & EDP_PSR_ENABLE));
+		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+	}
+	dev_priv->psr.active = false;
 }
 
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	i915_reg_t psr_status;
+	u32 psr_status_mask;
 
 	lockdep_assert_held(&dev_priv->psr.lock);
 
@@ -707,7 +766,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 
 	DRM_DEBUG_KMS("Disabling PSR%s\n",
 		      dev_priv->psr.psr2_enabled ? "2" : "1");
-	intel_psr_disable_source(intel_dp);
+
+	intel_psr_exit(dev_priv);
+
+	if (dev_priv->psr.psr2_enabled) {
+		psr_status = EDP_PSR2_STATUS;
+		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+	} else {
+		psr_status = EDP_PSR_STATUS;
+		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+	}
+
+	/* Wait till PSR is idle */
+	if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
+				    2000))
+		DRM_ERROR("Timed out waiting PSR idle state\n");
 
 	/* Disable PSR on Sink */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -893,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+	struct i915_psr *psr = &dev_priv->psr;
+
+	intel_psr_disable_locked(psr->dp);
+	psr->sink_not_reliable = true;
+	/* let's make sure that sink is awaken */
+	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
 static void intel_psr_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
@@ -903,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
 	if (!dev_priv->psr.enabled)
 		goto unlock;
 
+	if (READ_ONCE(dev_priv->psr.irq_aux_error))
+		intel_psr_handle_irq(dev_priv);
+
 	/*
 	 * We have to make sure PSR is ready for re-enable
 	 * otherwise it keeps disabled until next full enable/disable cycle.
@@ -925,25 +1011,6 @@ static void intel_psr_work(struct work_struct *work)
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-
-	if (!dev_priv->psr.active)
-		return;
-
-	if (dev_priv->psr.psr2_enabled) {
-		val = I915_READ(EDP_PSR2_CTL);
-		WARN_ON(!(val & EDP_PSR2_ENABLE));
-		I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
-	} else {
-		val = I915_READ(EDP_PSR_CTL);
-		WARN_ON(!(val & EDP_PSR_ENABLE));
-		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
-	}
-	dev_priv->psr.active = false;
-}
-
 /**
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
@@ -960,9 +1027,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_crtc *crtc;
-	enum pipe pipe;
-
 	if (!CAN_PSR(dev_priv))
 		return;
 
@@ -975,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
-	pipe = to_intel_crtc(crtc)->pipe;
-
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
 	if (frontbuffer_bits)
@@ -1003,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_crtc *crtc;
-	enum pipe pipe;
-
 	if (!CAN_PSR(dev_priv))
 		return;
 
@@ -1018,28 +1076,21 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
-	pipe = to_intel_crtc(crtc)->pipe;
-
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
 	/* By definition flush = invalidate + flush */
 	if (frontbuffer_bits) {
-		if (dev_priv->psr.psr2_enabled) {
-			intel_psr_exit(dev_priv);
-		} else {
-			/*
-			 * Display WA #0884: all
-			 * This documented WA for bxt can be safely applied
-			 * broadly so we can force HW tracking to exit PSR
-			 * instead of disabling and re-enabling.
-			 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
-			 * but it makes more sense write to the current active
-			 * pipe.
-			 */
-			I915_WRITE(CURSURFLIVE(pipe), 0);
-		}
+		/*
+		 * Display WA #0884: all
+		 * This documented WA for bxt can be safely applied
+		 * broadly so we can force HW tracking to exit PSR
+		 * instead of disabling and re-enabling.
+		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+		 * but it makes more sense write to the current active
+		 * pipe.
+		 */
+		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
 	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1056,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
  */
 void intel_psr_init(struct drm_i915_private *dev_priv)
 {
+	u32 val;
+
 	if (!HAS_PSR(dev_priv))
 		return;
 
@@ -1065,11 +1118,24 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 	if (!dev_priv->psr.sink_support)
 		return;
 
-	if (i915_modparams.enable_psr == -1) {
-		i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+	if (i915_modparams.enable_psr == -1)
+		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+			i915_modparams.enable_psr = 0;
 
-		/* Per platform default: all disabled. */
-		i915_modparams.enable_psr = 0;
+	/*
+	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+	 * will still keep the error set even after the reset done in the
+	 * irq_preinstall and irq_uninstall hooks.
+	 * And enabling in this situation cause the screen to freeze in the
+	 * first time that PSR HW tries to activate so lets keep PSR disabled
+	 * to avoid any rendering problems.
+	 */
+	val = I915_READ(EDP_PSR_IIR);
+	val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+	if (val) {
+		DRM_DEBUG_KMS("PSR interruption error set\n");
+		dev_priv->psr.sink_not_reliable = true;
+		return;
 	}
 
 	/* Set link_standby x link_off defaults */
@@ -1109,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
 		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
 		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
 	}
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1126,12 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	if (val & ~errors)
 		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
 			  val & ~errors);
-	if (val & errors)
+	if (val & errors) {
 		intel_psr_disable_locked(intel_dp);
+		psr->sink_not_reliable = true;
+	}
 	/* clear status register */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-
-	/* TODO: handle PSR2 errors */
 exit:
 	mutex_unlock(&psr->lock);
 }
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	bool ret;
+
+	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+		return false;
+
+	mutex_lock(&dev_priv->psr.lock);
+	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+	mutex_unlock(&dev_priv->psr.lock);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 0000000..ec2b0fc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+	i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+	DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+	i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+	DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+	i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+	DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+	i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+	DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+	i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+	DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+	int device;
+	int subsystem_vendor;
+	int subsystem_device;
+	void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+	void (*hook)(struct drm_i915_private *i915);
+	const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+	return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+	{
+		.dmi_id_list = &(const struct dmi_system_id[]) {
+			{
+				.callback = intel_dmi_reverse_brightness,
+				.ident = "NCR Corporation",
+				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
+				},
+			},
+			{ }  /* terminating entry */
+		},
+		.hook = quirk_invert_brightness,
+	},
+};
+
+static struct intel_quirk intel_quirks[] = {
+	/* Lenovo U160 cannot use SSC on LVDS */
+	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+	/* Sony Vaio Y cannot use SSC on LVDS */
+	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+	/* Acer Aspire 5734Z must invert backlight brightness */
+	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+	/* Acer/eMachines G725 */
+	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+	/* Acer/eMachines e725 */
+	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+	/* Acer/Packard Bell NCL20 */
+	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+	/* Acer Aspire 4736Z */
+	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+	/* Acer Aspire 5336 */
+	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+	/* Acer C720 Chromebook (Core i3 4005U) */
+	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+	/* Apple Macbook 2,1 (Core 2 T7400) */
+	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+	/* Apple Macbook 4,1 */
+	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+	/* Toshiba CB35 Chromebook (Celeron 2955U) */
+	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+	/* HP Chromebook 14 (Celeron 2955U) */
+	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+	/* Dell Chromebook 11 */
+	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+	/* Dell Chromebook 11 (2015 version) */
+	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+	/* Toshiba Satellite P50-C-18C */
+	{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+	/* GeminiLake NUC */
+	{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+	{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+	/* ASRock ITX*/
+	{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+	{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+	struct pci_dev *d = i915->drm.pdev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+		struct intel_quirk *q = &intel_quirks[i];
+
+		if (d->device == q->device &&
+		    (d->subsystem_vendor == q->subsystem_vendor ||
+		     q->subsystem_vendor == PCI_ANY_ID) &&
+		    (d->subsystem_device == q->subsystem_device ||
+		     q->subsystem_device == PCI_ANY_ID))
+			q->hook(i915);
+	}
+	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+			intel_dmi_quirks[i].hook(i915);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1f8d2a6..fbeaec3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -533,6 +533,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
 
 	intel_engine_reset_breadcrumbs(engine);
 
+	if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+		if (HAS_VEBOX(dev_priv))
+			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+	}
+
 	/* Enforce ordering by reading HEAD register back */
 	I915_READ_HEAD(engine);
 
@@ -550,10 +557,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	/* Check that the ring offsets point within the ring! */
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
 	intel_ring_update_space(ring);
+
+	/* First wake the ring up to an empty/idle ring */
 	I915_WRITE_HEAD(engine, ring->head);
-	I915_WRITE_TAIL(engine, ring->tail);
+	I915_WRITE_TAIL(engine, ring->head);
 	(void)I915_READ_TAIL(engine);
 
 	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -578,6 +586,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) > 2)
 		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 
+	/* Now awake, let it get started */
+	if (ring->tail != ring->head) {
+		I915_WRITE_TAIL(engine, ring->tail);
+		(void)I915_READ_TAIL(engine);
+	}
+
 	/* Papering over lost _interrupts_ immediately following the restart */
 	intel_engine_wakeup(engine);
 out:
@@ -612,7 +626,9 @@ static void skip_request(struct i915_request *rq)
 
 static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
 {
-	GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
+	GEM_TRACE("%s request global=%d, current=%d\n",
+		  engine->name, rq ? rq->global_seqno : 0,
+		  intel_engine_get_seqno(engine));
 
 	/*
 	 * Try to restore the logical GPU state to match the continuation
@@ -644,7 +660,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ctx_workarounds_emit(rq);
+	ret = intel_engine_emit_ctx_wa(rq);
 	if (ret != 0)
 		return ret;
 
@@ -662,8 +678,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_whitelist_workarounds_apply(engine);
-
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (IS_GEN(dev_priv, 4, 6))
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -745,9 +759,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!request->global_seqno);
-		if (!i915_request_completed(request))
-			dma_fence_set_error(&request->fence, -EIO);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			     &request->fence.flags))
+			continue;
+
+		dma_fence_set_error(&request->fence, -EIO);
 	}
+
+	intel_write_status_page(engine,
+				I915_GEM_HWS_INDEX,
+				intel_engine_last_submit(engine));
+
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -1061,8 +1084,7 @@ i915_emit_bb_start(struct i915_request *rq,
 int intel_ring_pin(struct intel_ring *ring)
 {
 	struct i915_vma *vma = ring->vma;
-	enum i915_map_type map =
-		HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
+	enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
 	unsigned int flags;
 	void *addr;
 	int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 767a719..72edaa7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
@@ -94,11 +94,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 #define I915_MAX_SUBSLICES 8
 
 #define instdone_slice_mask(dev_priv__) \
-	(INTEL_GEN(dev_priv__) == 7 ? \
+	(IS_GEN7(dev_priv__) ? \
 	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
 
 #define instdone_subslice_mask(dev_priv__) \
-	(INTEL_GEN(dev_priv__) == 7 ? \
+	(IS_GEN7(dev_priv__) ? \
 	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
 
 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -191,11 +191,22 @@ enum intel_engine_id {
 };
 
 struct i915_priolist {
+	struct list_head requests[I915_PRIORITY_COUNT];
 	struct rb_node node;
-	struct list_head requests;
+	unsigned long used;
 	int priority;
 };
 
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx - 1], \
+					 sched.link)
+
 struct st_preempt_hang {
 	struct completion completion;
 	bool inject_hang;
@@ -303,13 +314,6 @@ struct intel_engine_execlists {
 	struct rb_root_cached queue;
 
 	/**
-	 * @csb_read: control register for Context Switch buffer
-	 *
-	 * Note this register is always in mmio.
-	 */
-	u32 __iomem *csb_read;
-
-	/**
 	 * @csb_write: control register for Context Switch buffer
 	 *
 	 * Note this register may be either mmio or HWSP shadow.
@@ -329,15 +333,6 @@ struct intel_engine_execlists {
 	u32 preempt_complete_status;
 
 	/**
-	 * @csb_write_reset: reset value for CSB write pointer
-	 *
-	 * As the CSB write pointer maybe either in HWSP or as a field
-	 * inside an mmio register, we want to reprogram it slightly
-	 * differently to avoid later confusion.
-	 */
-	u32 csb_write_reset;
-
-	/**
 	 * @csb_head: context status buffer head
 	 */
 	u8 csb_head;
@@ -441,7 +436,9 @@ struct intel_engine_cs {
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
+	struct i915_wa_list ctx_wa_list;
 	struct i915_wa_list wa_list;
+	struct i915_wa_list whitelist;
 
 	u32             irq_keep_mask; /* always keep these interrupts */
 	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -488,11 +485,10 @@ struct intel_engine_cs {
 	 */
 	void		(*submit_request)(struct i915_request *rq);
 
-	/* Call when the priority on a request has changed and it and its
+	/*
+	 * Call when the priority on a request has changed and it and its
 	 * dependencies may need rescheduling. Note the request itself may
 	 * not be ready to run!
-	 *
-	 * Called under the struct_mutex.
 	 */
 	void		(*schedule)(struct i915_request *request,
 				    const struct i915_sched_attr *attr);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 44e4491..4350a52 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
 		return "TRANSCODER_C";
 	case POWER_DOMAIN_TRANSCODER_EDP:
 		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+		return "TRANSCODER_EDP_VDSC";
 	case POWER_DOMAIN_TRANSCODER_DSI_A:
 		return "TRANSCODER_DSI_A";
 	case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -208,7 +210,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 
 	is_enabled = true;
 
-	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
+	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 		if (power_well->desc->always_on)
 			continue;
 
@@ -436,6 +438,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
 
 	hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+	/* Display WA #1178: icl */
+	if (IS_ICELAKE(dev_priv) &&
+	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+	    !intel_bios_is_port_edp(dev_priv, port)) {
+		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+	}
 }
 
 static void
@@ -456,6 +467,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 	hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
+#define ICL_AUX_PW_TO_CH(pw_idx)	\
+	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+				 struct i915_power_well *power_well)
+{
+	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+	u32 val;
+
+	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+	val &= ~DP_AUX_CH_CTL_TBT_IO;
+	if (power_well->desc->hsw.is_tc_tbt)
+		val |= DP_AUX_CH_CTL_TBT_IO;
+	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+	hsw_power_well_enable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +495,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 				   struct i915_power_well *power_well)
 {
 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+	enum i915_power_well_id id = power_well->desc->id;
 	int pw_idx = power_well->desc->hsw.idx;
 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
+	u32 val;
 
-	return (I915_READ(regs->driver) & mask) == mask;
+	val = I915_READ(regs->driver);
+
+	/*
+	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+	 * and the MISC_IO PW will be not restored, so check instead for the
+	 * BIOS's own request bits, which are forced-on for these power wells
+	 * when exiting DC5/6.
+	 */
+	if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+		val |= I915_READ(regs->bios);
+
+	return (val & mask) == mask;
 }
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +595,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 	u32 mask;
 
 	mask = DC_STATE_EN_UPTO_DC5;
-	if (IS_GEN9_LP(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 11)
+		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+	else if (IS_GEN9_LP(dev_priv))
 		mask |= DC_STATE_EN_DC9;
 	else
 		mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +670,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 	assert_can_enable_dc9(dev_priv);
 
 	DRM_DEBUG_KMS("Enabling DC9\n");
-
-	intel_power_sequencer_reset(dev_priv);
+	/*
+	 * Power sequencer reset is not needed on
+	 * platforms with South Display Engine on PCH,
+	 * because PPS registers are always on.
+	 */
+	if (!HAS_PCH_SPLIT(dev_priv))
+		intel_power_sequencer_reset(dev_priv);
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 }
 
@@ -707,7 +758,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 	assert_csr_loaded(dev_priv);
 }
 
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc6(dev_priv);
 
@@ -808,6 +859,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 
 	if (IS_GEN9_LP(dev_priv))
 		bxt_verify_ddi_phy_power_wells(dev_priv);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		/*
+		 * DMC retains HW context only for port A, the other combo
+		 * PHY's HW context for port B is lost after DC transitions,
+		 * so we need to restore it manually.
+		 */
+		icl_combo_phys_init(dev_priv);
 }
 
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1667,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	     intel_display_power_domain_str(domain));
 	power_domains->domain_use_count[domain]--;
 
-	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
+	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
 		intel_power_well_put(dev_priv, power_well);
 
 	mutex_unlock(&power_domains->lock);
@@ -1971,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	 */
 #define ICL_PW_2_POWER_DOMAINS (			\
 	ICL_PW_3_POWER_DOMAINS |			\
+	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
 	BIT_ULL(POWER_DOMAIN_INIT))
 	/*
-	 * - eDP/DSI VDSC
 	 * - KVMR (HW control)
 	 */
 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
@@ -2041,7 +2100,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2058,7 +2117,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
 static const struct i915_power_well_desc i830_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2102,7 +2161,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
 static const struct i915_power_well_desc hsw_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2123,7 +2182,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
 static const struct i915_power_well_desc bdw_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2166,7 +2225,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
 static const struct i915_power_well_desc vlv_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2242,7 +2301,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
 static const struct i915_power_well_desc chv_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2293,7 +2352,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 static const struct i915_power_well_desc skl_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2301,6 +2360,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
 	{
 		.name = "power well 1",
 		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_1,
@@ -2313,6 +2373,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
 	{
 		.name = "MISC IO power well",
 		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2446,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
 static const struct i915_power_well_desc bxt_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 	},
 	{
 		.name = "power well 1",
+		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_1,
@@ -2443,7 +2506,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
 static const struct i915_power_well_desc glk_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2451,6 +2514,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
 	{
 		.name = "power well 1",
 		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_1,
@@ -2571,7 +2635,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
 static const struct i915_power_well_desc cnl_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2579,6 +2643,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
 	{
 		.name = "power well 1",
 		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_1,
@@ -2716,6 +2781,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
 	.is_enabled = hsw_power_well_enabled,
 };
 
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+	.sync_hw = hsw_power_well_sync_hw,
+	.enable = icl_tc_phy_aux_power_well_enable,
+	.disable = hsw_power_well_disable,
+	.is_enabled = hsw_power_well_enabled,
+};
+
 static const struct i915_power_well_regs icl_aux_power_well_regs = {
 	.bios	= ICL_PWR_WELL_CTL_AUX1,
 	.driver	= ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2803,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
 static const struct i915_power_well_desc icl_power_wells[] = {
 	{
 		.name = "always-on",
-		.always_on = 1,
+		.always_on = true,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.id = DISP_PW_ID_NONE,
@@ -2739,6 +2811,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
 	{
 		.name = "power well 1",
 		/* Handled by the DMC firmware */
+		.always_on = true,
 		.domains = 0,
 		.ops = &hsw_power_well_ops,
 		.id = SKL_DISP_PW_1,
@@ -2861,81 +2934,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
 	{
 		.name = "AUX C",
 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+			.hsw.is_tc_tbt = false,
 		},
 	},
 	{
 		.name = "AUX D",
 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+			.hsw.is_tc_tbt = false,
 		},
 	},
 	{
 		.name = "AUX E",
 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+			.hsw.is_tc_tbt = false,
 		},
 	},
 	{
 		.name = "AUX F",
 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+			.hsw.is_tc_tbt = false,
 		},
 	},
 	{
 		.name = "AUX TBT1",
 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+			.hsw.is_tc_tbt = true,
 		},
 	},
 	{
 		.name = "AUX TBT2",
 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+			.hsw.is_tc_tbt = true,
 		},
 	},
 	{
 		.name = "AUX TBT3",
 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+			.hsw.is_tc_tbt = true,
 		},
 	},
 	{
 		.name = "AUX TBT4",
 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
-		.ops = &hsw_power_well_ops,
+		.ops = &icl_tc_phy_aux_power_well_ops,
 		.id = DISP_PW_ID_NONE,
 		{
 			.hsw.regs = &icl_aux_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+			.hsw.is_tc_tbt = true,
 		},
 	},
 	{
@@ -2969,17 +3050,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
 	int requested_dc;
 	int max_dc;
 
-	if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
+	if (INTEL_GEN(dev_priv) >= 11) {
 		max_dc = 2;
-		mask = 0;
-	} else if (IS_GEN9_LP(dev_priv)) {
-		max_dc = 1;
 		/*
 		 * DC9 has a separate HW flow from the rest of the DC states,
 		 * not depending on the DMC firmware. It's needed by system
 		 * suspend/resume, so allow it unconditionally.
 		 */
 		mask = DC_STATE_EN_DC9;
+	} else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+		max_dc = 2;
+		mask = 0;
+	} else if (IS_GEN9_LP(dev_priv)) {
+		max_dc = 1;
+		mask = DC_STATE_EN_DC9;
 	} else {
 		max_dc = 0;
 		mask = 0;
@@ -3075,12 +3159,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
 	 */
 	if (IS_ICELAKE(dev_priv)) {
 		err = set_power_wells(power_domains, icl_power_wells);
-	} else if (IS_HASWELL(dev_priv)) {
-		err = set_power_wells(power_domains, hsw_power_wells);
-	} else if (IS_BROADWELL(dev_priv)) {
-		err = set_power_wells(power_domains, bdw_power_wells);
-	} else if (IS_GEN9_BC(dev_priv)) {
-		err = set_power_wells(power_domains, skl_power_wells);
 	} else if (IS_CANNONLAKE(dev_priv)) {
 		err = set_power_wells(power_domains, cnl_power_wells);
 
@@ -3092,13 +3170,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
 		 */
 		if (!IS_CNL_WITH_PORT_F(dev_priv))
 			power_domains->power_well_count -= 2;
-
-	} else if (IS_BROXTON(dev_priv)) {
-		err = set_power_wells(power_domains, bxt_power_wells);
 	} else if (IS_GEMINILAKE(dev_priv)) {
 		err = set_power_wells(power_domains, glk_power_wells);
+	} else if (IS_BROXTON(dev_priv)) {
+		err = set_power_wells(power_domains, bxt_power_wells);
+	} else if (IS_GEN9_BC(dev_priv)) {
+		err = set_power_wells(power_domains, skl_power_wells);
 	} else if (IS_CHERRYVIEW(dev_priv)) {
 		err = set_power_wells(power_domains, chv_power_wells);
+	} else if (IS_BROADWELL(dev_priv)) {
+		err = set_power_wells(power_domains, bdw_power_wells);
+	} else if (IS_HASWELL(dev_priv)) {
+		err = set_power_wells(power_domains, hsw_power_wells);
 	} else if (IS_VALLEYVIEW(dev_priv)) {
 		err = set_power_wells(power_domains, vlv_power_wells);
 	} else if (IS_I830(dev_priv)) {
@@ -3238,18 +3321,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
 	I915_WRITE(MBUS_ABOX_CTL, val);
 }
 
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+				      bool enable)
+{
+	i915_reg_t reg;
+	u32 reset_bits, val;
+
+	if (IS_IVYBRIDGE(dev_priv)) {
+		reg = GEN7_MSG_CTL;
+		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+	} else {
+		reg = HSW_NDE_RSTWRN_OPT;
+		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+	}
+
+	val = I915_READ(reg);
+
+	if (enable)
+		val |= reset_bits;
+	else
+		val &= ~reset_bits;
+
+	I915_WRITE(reg, val);
+}
+
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
 				   bool resume)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	uint32_t val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
 	/* enable PCH reset handshake */
-	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
 	/* enable PG1 and Misc I/O */
 	mutex_lock(&power_domains->lock);
@@ -3305,7 +3410,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	uint32_t val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3315,9 +3419,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
 	 * Move the handshake programming to initialization sequence.
 	 * Previously was left up to BIOS.
 	 */
-	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
-	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+	intel_pch_reset_handshake(dev_priv, false);
 
 	/* Enable PG1 */
 	mutex_lock(&power_domains->lock);
@@ -3363,101 +3465,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
 	usleep_range(10, 30);		/* 10 us delay per Bspec */
 }
 
-enum {
-	PROCMON_0_85V_DOT_0,
-	PROCMON_0_95V_DOT_0,
-	PROCMON_0_95V_DOT_1,
-	PROCMON_1_05V_DOT_0,
-	PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
-	u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
-	[PROCMON_0_85V_DOT_0] =
-		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
-	[PROCMON_0_95V_DOT_0] =
-		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
-	[PROCMON_0_95V_DOT_1] =
-		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
-	[PROCMON_1_05V_DOT_0] =
-		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
-	[PROCMON_1_05V_DOT_1] =
-		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
-				       enum port port)
-{
-	const struct cnl_procmon *procmon;
-	u32 val;
-
-	val = I915_READ(ICL_PORT_COMP_DW3(port));
-	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
-	default:
-		MISSING_CASE(val);
-		/* fall through */
-	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
-		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
-		break;
-	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
-		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
-		break;
-	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
-		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
-		break;
-	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
-		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
-		break;
-	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
-		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
-		break;
-	}
-
-	val = I915_READ(ICL_PORT_COMP_DW1(port));
-	val &= ~((0xff << 16) | 0xff);
-	val |= procmon->dw1;
-	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
-	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
-	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	u32 val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
 	/* 1. Enable PCH Reset Handshake */
-	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-	val |= RESET_PCH_HANDSHAKE_ENABLE;
-	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
-	/* 2. Enable Comp */
-	val = I915_READ(CHICKEN_MISC_2);
-	val &= ~CNL_COMP_PWR_DOWN;
-	I915_WRITE(CHICKEN_MISC_2, val);
-
-	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
-	cnl_set_procmon_ref_values(dev_priv, PORT_A);
-
-	val = I915_READ(CNL_PORT_COMP_DW0);
-	val |= COMP_INIT;
-	I915_WRITE(CNL_PORT_COMP_DW0, val);
-
-	/* 3. */
-	val = I915_READ(CNL_PORT_CL1CM_DW5);
-	val |= CL_POWER_DOWN_ENABLE;
-	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+	/* 2-3. */
+	cnl_combo_phys_init(dev_priv);
 
 	/*
 	 * 4. Enable Power Well 1 (PG1).
@@ -3482,7 +3501,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	u32 val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3506,44 +3524,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 
 	usleep_range(10, 30);		/* 10 us delay per Bspec */
 
-	/* 5. Disable Comp */
-	val = I915_READ(CHICKEN_MISC_2);
-	val |= CNL_COMP_PWR_DOWN;
-	I915_WRITE(CHICKEN_MISC_2, val);
+	/* 5. */
+	cnl_combo_phys_uninit(dev_priv);
 }
 
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
-				  bool resume)
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+			   bool resume)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	enum port port;
-	u32 val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
 	/* 1. Enable PCH reset handshake. */
-	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-	val |= RESET_PCH_HANDSHAKE_ENABLE;
-	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
-	for (port = PORT_A; port <= PORT_B; port++) {
-		/* 2. Enable DDI combo PHY comp. */
-		val = I915_READ(ICL_PHY_MISC(port));
-		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-		I915_WRITE(ICL_PHY_MISC(port), val);
-
-		cnl_set_procmon_ref_values(dev_priv, port);
-
-		val = I915_READ(ICL_PORT_COMP_DW0(port));
-		val |= COMP_INIT;
-		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
-		/* 3. Set power down enable. */
-		val = I915_READ(ICL_PORT_CL_DW5(port));
-		val |= CL_POWER_DOWN_ENABLE;
-		I915_WRITE(ICL_PORT_CL_DW5(port), val);
-	}
+	/* 2-3. */
+	icl_combo_phys_init(dev_priv);
 
 	/*
 	 * 4. Enable Power Well 1 (PG1).
@@ -3567,12 +3564,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 		intel_csr_load_program(dev_priv);
 }
 
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *well;
-	enum port port;
-	u32 val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -3594,12 +3589,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 	intel_power_well_disable(dev_priv, well);
 	mutex_unlock(&power_domains->lock);
 
-	/* 5. Disable Comp */
-	for (port = PORT_A; port <= PORT_B; port++) {
-		val = I915_READ(ICL_PHY_MISC(port));
-		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-		I915_WRITE(ICL_PHY_MISC(port), val);
-	}
+	/* 5. */
+	icl_combo_phys_uninit(dev_priv);
 }
 
 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3757,7 +3748,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 		mutex_lock(&power_domains->lock);
 		vlv_cmnlane_wa(dev_priv);
 		mutex_unlock(&power_domains->lock);
-	}
+	} else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
+		intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
 
 	/*
 	 * Keep all power wells enabled for any dependent HW access during
@@ -3951,14 +3943,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
 		int domains_count;
 		bool enabled;
 
-		/*
-		 * Power wells not belonging to any domain (like the MISC_IO
-		 * and PW1 power wells) are under FW control, so ignore them,
-		 * since their state can change asynchronously.
-		 */
-		if (!power_well->desc->domains)
-			continue;
-
 		enabled = power_well->desc->ops->is_enabled(dev_priv,
 							    power_well);
 		if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e..5805ec1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
 	bool has_hdmi_audio;
 	bool rgb_quant_range_selectable;
 
-	/**
-	 * This is sdvo fixed pannel mode pointer
-	 */
-	struct drm_display_mode *sdvo_lvds_fixed_mode;
-
 	/* DDC bus used by this SDVO encoder */
 	uint8_t ddc_bus;
 
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 	args.height = height;
 	args.interlace = 0;
 
-	if (IS_LVDS(intel_sdvo_connector) &&
-	   (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
-	    intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
-		args.scaled = 1;
+	if (IS_LVDS(intel_sdvo_connector)) {
+		const struct drm_display_mode *fixed_mode =
+			intel_sdvo_connector->base.panel.fixed_mode;
+
+		if (fixed_mode->hdisplay != width ||
+		    fixed_mode->vdisplay != height)
+			args.scaled = 1;
+	}
 
 	return intel_sdvo_set_value(intel_sdvo,
 				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 
 	DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
 	pipe_config->pipe_bpp = 8*3;
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
 	if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
 		pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 		pipe_config->sdvo_tv_clock = true;
 	} else if (IS_LVDS(intel_sdvo_connector)) {
 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
-							     intel_sdvo->sdvo_lvds_fixed_mode))
+							     intel_sdvo_connector->base.panel.fixed_mode))
 			return false;
 
 		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 	/* lvds has a special fixed output timing. */
 	if (IS_LVDS(intel_sdvo_connector))
 		intel_sdvo_get_dtd_from_mode(&output_dtd,
-					     intel_sdvo->sdvo_lvds_fixed_mode);
+					     intel_sdvo_connector->base.panel.fixed_mode);
 	else
 		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
 	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
 		return MODE_CLOCK_HIGH;
 
 	if (IS_LVDS(intel_sdvo_connector)) {
-		if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+		const struct drm_display_mode *fixed_mode =
+			intel_sdvo_connector->base.panel.fixed_mode;
+
+		if (mode->hdisplay > fixed_mode->hdisplay)
 			return MODE_PANEL;
 
-		if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+		if (mode->vdisplay > fixed_mode->vdisplay)
 			return MODE_PANEL;
 	}
 
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
 	return !list_empty(&connector->probed_modes);
 }
 
-static void intel_sdvo_destroy(struct drm_connector *connector)
-{
-	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-
-	drm_connector_cleanup(connector);
-	kfree(intel_sdvo_connector);
-}
-
 static int
 intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
 					 const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
 	.atomic_set_property = intel_sdvo_connector_atomic_set_property,
 	.late_register = intel_sdvo_connector_register,
 	.early_unregister = intel_sdvo_connector_unregister,
-	.destroy = intel_sdvo_destroy,
+	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
 };
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
 	struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
 
-	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
-		drm_mode_destroy(encoder->dev,
-				 intel_sdvo->sdvo_lvds_fixed_mode);
-
 	i2c_del_adapter(&intel_sdvo->ddc);
 	intel_encoder_destroy(encoder);
 }
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 	return true;
 
 err:
-	intel_sdvo_destroy(connector);
+	intel_connector_destroy(connector);
 	return false;
 }
 
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 
 	list_for_each_entry(mode, &connector->probed_modes, head) {
 		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
-			intel_sdvo->sdvo_lvds_fixed_mode =
+			struct drm_display_mode *fixed_mode =
 				drm_mode_duplicate(connector->dev, mode);
+
+			intel_panel_init(&intel_connector->panel,
+					 fixed_mode, NULL);
 			break;
 		}
 	}
 
-	if (!intel_sdvo->sdvo_lvds_fixed_mode)
+	if (!intel_connector->panel.fixed_mode)
 		goto err;
 
 	return true;
 
 err:
-	intel_sdvo_destroy(connector);
+	intel_connector_destroy(connector);
 	return false;
 }
 
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
 				 &dev->mode_config.connector_list, head) {
 		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
 			drm_connector_unregister(connector);
-			intel_sdvo_destroy(connector);
+			intel_connector_destroy(connector);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d3090a7..d2e003d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
 #include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
 
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			     int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 	src->y2 = (src_y + src_h) << 16;
 
 	if (fb->format->is_yuv &&
-	    fb->format->format != DRM_FORMAT_NV12 &&
 	    (src_x & 1 || src_w & 1)) {
 		DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
 			      src_x, src_w);
 		return -EINVAL;
 	}
 
+	if (fb->format->is_yuv &&
+	    fb->format->num_planes > 1 &&
+	    (src_y & 1 || src_h & 1)) {
+		DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
+			      src_y, src_h);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-unsigned int
+static unsigned int
 skl_plane_max_stride(struct intel_plane *plane,
 		     u32 pixel_format, u64 modifier,
 		     unsigned int rotation)
@@ -328,7 +336,8 @@ skl_program_scaler(struct intel_plane *plane,
 				      0, INT_MAX);
 
 	/* TODO: handle sub-pixel coordinates */
-	if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
+	if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
+	    !icl_is_hdr_plane(plane)) {
 		y_hphase = skl_scaler_calc_phase(1, hscale, false);
 		y_vphase = skl_scaler_calc_phase(1, vscale, false);
 
@@ -346,7 +355,6 @@ skl_program_scaler(struct intel_plane *plane,
 
 	I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
 		      PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
-	I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
 	I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
 		      PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
 	I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
@@ -355,70 +363,239 @@ skl_program_scaler(struct intel_plane *plane,
 	I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
 }
 
-void
-skl_update_plane(struct intel_plane *plane,
-		 const struct intel_crtc_state *crtc_state,
-		 const struct intel_plane_state *plane_state)
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI		0x1800
+#define PREOFF_YUV_TO_RGB_ME		0x1F00
+#define PREOFF_YUV_TO_RGB_LO		0x1800
+
+#define  ROFF(x)          (((x) & 0xffff) << 16)
+#define  GOFF(x)          (((x) & 0xffff) << 0)
+#define  BOFF(x)          (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc(struct intel_plane *plane,
+		      const struct intel_crtc_state *crtc_state,
+		      const struct intel_plane_state *plane_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+	enum plane_id plane_id = plane->id;
+
+	static const u16 input_csc_matrix[][9] = {
+		/*
+		 * BT.601 full range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.000, 0.000, 1.371,
+		 *  1.000, -0.336, -0.698,
+		 *  1.000, 1.732, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT601] = {
+			0x7AF8, 0x7800, 0x0,
+			0x8B28, 0x7800, 0x9AC0,
+			0x0, 0x7800, 0x7DD8,
+		},
+		/*
+		 * BT.709 full range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.000, 0.000, 1.574,
+		 *  1.000, -0.187, -0.468,
+		 *  1.000, 1.855, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT709] = {
+			0x7C98, 0x7800, 0x0,
+			0x9EF8, 0x7800, 0xABF8,
+			0x0, 0x7800,  0x7ED8,
+		},
+	};
+
+	/* Matrix for Limited Range to Full Range Conversion */
+	static const u16 input_csc_matrix_lr[][9] = {
+		/*
+		 * BT.601 Limted range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.164384, 0.000, 1.596370,
+		 *  1.138393, -0.382500, -0.794598,
+		 *  1.138393, 1.971696, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT601] = {
+			0x7CC8, 0x7950, 0x0,
+			0x8CB8, 0x7918, 0x9C40,
+			0x0, 0x7918, 0x7FC8,
+		},
+		/*
+		 * BT.709 Limited range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.164, 0.000, 1.833671,
+		 *  1.138393, -0.213249, -0.532909,
+		 *  1.138393, 2.112402, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT709] = {
+			0x7EA8, 0x7950, 0x0,
+			0x8888, 0x7918, 0xADA8,
+			0x0, 0x7918,  0x6870,
+		},
+	};
+	const u16 *csc;
+
+	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+		csc = input_csc_matrix[plane_state->base.color_encoding];
+	else
+		csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+		      GOFF(csc[1]));
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+		      GOFF(csc[4]));
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+		      GOFF(csc[7]));
+	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+		      PREOFF_YUV_TO_RGB_HI);
+	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+		      PREOFF_YUV_TO_RGB_ME);
+	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+		      PREOFF_YUV_TO_RGB_LO);
+	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state,
+		  const struct intel_plane_state *plane_state,
+		  int color_plane, bool slave, u32 plane_ctl)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
 	enum pipe pipe = plane->pipe;
-	u32 plane_ctl = plane_state->ctl;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	u32 surf_addr = plane_state->color_plane[0].offset;
-	u32 stride = skl_plane_stride(plane_state, 0);
+	u32 surf_addr = plane_state->color_plane[color_plane].offset;
+	u32 stride = skl_plane_stride(plane_state, color_plane);
 	u32 aux_stride = skl_plane_stride(plane_state, 1);
 	int crtc_x = plane_state->base.dst.x1;
 	int crtc_y = plane_state->base.dst.y1;
-	uint32_t x = plane_state->color_plane[0].x;
-	uint32_t y = plane_state->color_plane[0].y;
+	uint32_t x = plane_state->color_plane[color_plane].x;
+	uint32_t y = plane_state->color_plane[color_plane].y;
 	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	struct intel_plane *linked = plane_state->linked_plane;
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	u8 alpha = plane_state->base.alpha >> 8;
 	unsigned long irqflags;
+	u32 keymsk, keymax;
 
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
 
+	keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+	keymsk = key->channel_mask & 0x3ffffff;
+	if (alpha < 0xff)
+		keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+	/* The scaler will handle the output position */
+	if (plane_state->scaler_id >= 0) {
+		crtc_x = 0;
+		crtc_y = 0;
+	}
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
+	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
+		      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
+
+	if (icl_is_hdr_plane(plane)) {
+		u32 cus_ctl = 0;
+
+		if (linked) {
+			/* Enable and use MPEG-2 chroma siting */
+			cus_ctl = PLANE_CUS_ENABLE |
+				PLANE_CUS_HPHASE_0 |
+				PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+				PLANE_CUS_VPHASE_0_25;
+
+			if (linked->id == PLANE_SPRITE5)
+				cus_ctl |= PLANE_CUS_PLANE_7;
+			else if (linked->id == PLANE_SPRITE4)
+				cus_ctl |= PLANE_CUS_PLANE_6;
+			else
+				MISSING_CASE(linked->id);
+		}
+
+		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
+	}
+
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
 			      plane_state->color_ctl);
 
-	if (key->flags) {
-		I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-		I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
-		I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
-	}
+	if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+		icl_program_input_csc(plane, crtc_state, plane_state);
+
+	skl_write_plane_wm(plane, crtc_state);
+
+	I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+	I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+	I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
 
 	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
-	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
-	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
-	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
-		      (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
-	I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-		      (plane_state->color_plane[1].y << 16) |
-		      plane_state->color_plane[1].x);
 
-	if (plane_state->scaler_id >= 0) {
-		skl_program_scaler(plane, crtc_state, plane_state);
+	if (INTEL_GEN(dev_priv) < 11)
+		I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+			      (plane_state->color_plane[1].y << 16) |
+			      plane_state->color_plane[1].x);
 
-		I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
-	} else {
-		I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
-	}
-
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
 	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
 		      intel_plane_ggtt_offset(plane_state) + surf_addr);
-	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+	if (!slave && plane_state->scaler_id >= 0)
+		skl_program_scaler(plane, crtc_state, plane_state);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+static void
+skl_update_plane(struct intel_plane *plane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
+{
+	int color_plane = 0;
+
+	if (plane_state->linked_plane) {
+		/* Program the UV plane */
+		color_plane = 1;
+	}
+
+	skl_program_plane(plane, crtc_state, plane_state,
+			  color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
+{
+	skl_program_plane(plane, crtc_state, plane_state, 0, true,
+			  plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
@@ -427,15 +604,15 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+	skl_write_plane_wm(plane, crtc_state);
 
+	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
 	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
-	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-bool
+static bool
 skl_plane_get_hw_state(struct intel_plane *plane,
 		       enum pipe *pipe)
 {
@@ -628,7 +805,6 @@ vlv_update_plane(struct intel_plane *plane,
 		 const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
 	enum pipe pipe = plane->pipe;
 	enum plane_id plane_id = plane->id;
 	u32 sprctl = plane_state->ctl;
@@ -651,38 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	vlv_update_clrc(plane_state);
+	I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+		      plane_state->color_plane[0].stride);
+	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
 		chv_update_csc(plane_state);
 
 	if (key->flags) {
 		I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
-		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
 		I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
 	}
-	I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
-		      plane_state->color_plane[0].stride);
-	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 
-	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
-	else
-		I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+	I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+	I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
 
-	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
-	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
 	I915_WRITE_FW(SPSURF(pipe, plane_id),
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-	POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+	vlv_update_clrc(plane_state);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
@@ -692,9 +871,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-
 	I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
-	POSTING_READ_FW(SPSURF(pipe, plane_id));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -789,7 +966,6 @@ ivb_update_plane(struct intel_plane *plane,
 		 const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
 	enum pipe pipe = plane->pipe;
 	u32 sprctl = plane_state->ctl, sprscale = 0;
 	u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -818,37 +994,42 @@ ivb_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	if (key->flags) {
-		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
-		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
-	}
-
 	I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
 	I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
-	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
-	 * register */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
-	else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
-	else
-		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
-
 	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
 	if (IS_IVYBRIDGE(dev_priv))
 		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
+	if (key->flags) {
+		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+	}
+
+	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+	 * register */
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+	} else {
+		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+	}
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(SPRCTL(pipe), sprctl);
 	I915_WRITE_FW(SPRSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-	POSTING_READ_FW(SPRSURF(pipe));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
@@ -857,12 +1038,10 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	I915_WRITE_FW(SPRCTL(pipe), 0);
-	/* Can't leave the scaler enabled... */
+	/* Disable the scaler */
 	if (IS_IVYBRIDGE(dev_priv))
 		I915_WRITE_FW(SPRSCALE(pipe), 0);
-
 	I915_WRITE_FW(SPRSURF(pipe), 0);
-	POSTING_READ_FW(SPRSURF(pipe));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -961,7 +1140,6 @@ g4x_update_plane(struct intel_plane *plane,
 		 const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
 	enum pipe pipe = plane->pipe;
 	u32 dvscntr = plane_state->ctl, dvsscale = 0;
 	u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -990,32 +1168,35 @@ g4x_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	if (key->flags) {
-		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
-		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
-	}
-
 	I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
 	I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
-	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
-	else
-		I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
-
 	I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
+	if (key->flags) {
+		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+	}
+
+	I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+	I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+
+	/*
+	 * The control register self-arms if the plane was previously
+	 * disabled. Try to make the plane enable atomic by writing
+	 * the control register just before the surface register.
+	 */
 	I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
 	I915_WRITE_FW(DVSSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
-	POSTING_READ_FW(DVSSURF(pipe));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+		  const struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum pipe pipe = plane->pipe;
@@ -1026,9 +1207,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 	I915_WRITE_FW(DVSCNTR(pipe), 0);
 	/* Disable the scaler */
 	I915_WRITE_FW(DVSSCALE(pipe), 0);
-
 	I915_WRITE_FW(DVSSURF(pipe), 0);
-	POSTING_READ_FW(DVSSURF(pipe));
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1054,6 +1233,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
 	return ret;
 }
 
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+	if (!fb)
+		return false;
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8:
+		return false;
+	default:
+		return true;
+	}
+}
+
 static int
 g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *plane_state)
@@ -1121,18 +1313,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	int max_scale, min_scale;
+	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int ret;
 
-	if (INTEL_GEN(dev_priv) < 7) {
-		min_scale = 1;
-		max_scale = 16 << 16;
-	} else if (IS_IVYBRIDGE(dev_priv)) {
-		min_scale = 1;
-		max_scale = 2 << 16;
-	} else {
-		min_scale = DRM_PLANE_HELPER_NO_SCALING;
-		max_scale = DRM_PLANE_HELPER_NO_SCALING;
+	if (intel_fb_scalable(plane_state->base.fb)) {
+		if (INTEL_GEN(dev_priv) < 7) {
+			min_scale = 1;
+			max_scale = 16 << 16;
+		} else if (IS_IVYBRIDGE(dev_priv)) {
+			min_scale = 1;
+			max_scale = 2 << 16;
+		}
 	}
 
 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1219,6 +1411,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	unsigned int rotation = plane_state->base.rotation;
 	struct drm_format_name_buf format_name;
@@ -1247,13 +1441,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 		}
 
 		/*
-		 * 90/270 is not allowed with RGB64 16:16:16:16,
-		 * RGB 16-bit 5:6:5, and Indexed 8-bit.
-		 * TBD: Add RGB64 case once its added in supported format list.
+		 * 90/270 is not allowed with RGB64 16:16:16:16 and
+		 * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+		 * TBD: Add RGB64 case once its added in supported format
+		 * list.
 		 */
 		switch (fb->format->format) {
-		case DRM_FORMAT_C8:
 		case DRM_FORMAT_RGB565:
+			if (INTEL_GEN(dev_priv) >= 11)
+				break;
+			/* fall through */
+		case DRM_FORMAT_C8:
 			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
 				      drm_get_format_name(fb->format->format,
 							  &format_name));
@@ -1307,12 +1505,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
 	return 0;
 }
 
-int skl_plane_check(struct intel_crtc_state *crtc_state,
-		    struct intel_plane_state *plane_state)
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	unsigned int rotation = plane_state->base.rotation;
+	int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+	/* Display WA #1106 */
+	if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+	    (rotation == DRM_MODE_ROTATE_270 ||
+	     rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+		DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+			   struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	int max_scale, min_scale;
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int ret;
 
 	ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1320,15 +1537,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
 		return ret;
 
 	/* use scaler when colorkey is not required */
-	if (!plane_state->ckey.flags) {
-		const struct drm_framebuffer *fb = plane_state->base.fb;
-
+	if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
 		min_scale = 1;
-		max_scale = skl_max_scale(crtc_state,
-					  fb ? fb->format->format : 0);
-	} else {
-		min_scale = DRM_PLANE_HELPER_NO_SCALING;
-		max_scale = DRM_PLANE_HELPER_NO_SCALING;
+		max_scale = skl_max_scale(crtc_state, fb->format->format);
 	}
 
 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1349,10 +1560,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
+	ret = skl_plane_check_nv12_rotation(plane_state);
+	if (ret)
+		return ret;
+
 	ret = skl_check_plane_surface(plane_state);
 	if (ret)
 		return ret;
 
+	/* HW only has 8 bits pixel precision, disable plane if invisible */
+	if (!(plane_state->base.alpha >> 8))
+		plane_state->base.visible = false;
+
 	plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1517,24 +1736,30 @@ static const uint32_t vlv_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
-static uint32_t skl_plane_formats[] = {
+static const uint32_t skl_plane_formats[] = {
+	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
 	DRM_FORMAT_YUYV,
 	DRM_FORMAT_YVYU,
 	DRM_FORMAT_UYVY,
 	DRM_FORMAT_VYUY,
 };
 
-static uint32_t skl_planar_formats[] = {
+static const uint32_t skl_planar_formats[] = {
+	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
 	DRM_FORMAT_YUYV,
 	DRM_FORMAT_YVYU,
 	DRM_FORMAT_UYVY,
@@ -1739,8 +1964,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
 	.format_mod_supported = skl_plane_format_mod_supported,
 };
 
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
-		       enum pipe pipe, enum plane_id plane_id)
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, enum plane_id plane_id)
+{
+	if (!HAS_FBC(dev_priv))
+		return false;
+
+	return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+				 enum pipe pipe, enum plane_id plane_id)
+{
+	if (INTEL_GEN(dev_priv) >= 11)
+		return plane_id <= PLANE_SPRITE3;
+
+	/* Display WA #0870: skl, bxt */
+	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+		return false;
+
+	if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+		return false;
+
+	if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+		return false;
+
+	return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, enum plane_id plane_id)
 {
 	if (plane_id == PLANE_CURSOR)
 		return false;
@@ -1757,109 +2010,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 }
 
 struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
-			  enum pipe pipe, int plane)
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+			   enum pipe pipe, enum plane_id plane_id)
 {
-	struct intel_plane *intel_plane = NULL;
-	struct intel_plane_state *state = NULL;
-	const struct drm_plane_funcs *plane_funcs;
-	unsigned long possible_crtcs;
-	const uint32_t *plane_formats;
-	const uint64_t *modifiers;
+	struct intel_plane *plane;
+	enum drm_plane_type plane_type;
 	unsigned int supported_rotations;
-	int num_plane_formats;
+	unsigned int possible_crtcs;
+	const u64 *modifiers;
+	const u32 *formats;
+	int num_formats;
 	int ret;
 
-	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
-	if (!intel_plane) {
-		ret = -ENOMEM;
-		goto fail;
+	plane = intel_plane_alloc();
+	if (IS_ERR(plane))
+		return plane;
+
+	plane->pipe = pipe;
+	plane->id = plane_id;
+	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+	plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+	if (plane->has_fbc) {
+		struct intel_fbc *fbc = &dev_priv->fbc;
+
+		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
 	}
 
-	state = intel_create_plane_state(&intel_plane->base);
-	if (!state) {
-		ret = -ENOMEM;
-		goto fail;
+	plane->max_stride = skl_plane_max_stride;
+	plane->update_plane = skl_update_plane;
+	plane->disable_plane = skl_disable_plane;
+	plane->get_hw_state = skl_plane_get_hw_state;
+	plane->check_plane = skl_plane_check;
+	if (icl_is_nv12_y_plane(plane_id))
+		plane->update_slave = icl_update_slave;
+
+	if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+		formats = skl_planar_formats;
+		num_formats = ARRAY_SIZE(skl_planar_formats);
+	} else {
+		formats = skl_plane_formats;
+		num_formats = ARRAY_SIZE(skl_plane_formats);
 	}
-	intel_plane->base.state = &state->base;
 
-	if (INTEL_GEN(dev_priv) >= 9) {
-		state->scaler_id = -1;
+	plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+	if (plane->has_ccs)
+		modifiers = skl_plane_format_modifiers_ccs;
+	else
+		modifiers = skl_plane_format_modifiers_noccs;
 
-		intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
-							 PLANE_SPRITE0 + plane);
+	if (plane_id == PLANE_PRIMARY)
+		plane_type = DRM_PLANE_TYPE_PRIMARY;
+	else
+		plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-		intel_plane->max_stride = skl_plane_max_stride;
-		intel_plane->update_plane = skl_update_plane;
-		intel_plane->disable_plane = skl_disable_plane;
-		intel_plane->get_hw_state = skl_plane_get_hw_state;
-		intel_plane->check_plane = skl_plane_check;
+	possible_crtcs = BIT(pipe);
 
-		if (skl_plane_has_planar(dev_priv, pipe,
-					 PLANE_SPRITE0 + plane)) {
-			plane_formats = skl_planar_formats;
-			num_plane_formats = ARRAY_SIZE(skl_planar_formats);
-		} else {
-			plane_formats = skl_plane_formats;
-			num_plane_formats = ARRAY_SIZE(skl_plane_formats);
-		}
+	ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+				       possible_crtcs, &skl_plane_funcs,
+				       formats, num_formats, modifiers,
+				       plane_type,
+				       "plane %d%c", plane_id + 1,
+				       pipe_name(pipe));
+	if (ret)
+		goto fail;
 
-		if (intel_plane->has_ccs)
-			modifiers = skl_plane_format_modifiers_ccs;
-		else
-			modifiers = skl_plane_format_modifiers_noccs;
+	supported_rotations =
+		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
 
-		plane_funcs = &skl_plane_funcs;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		intel_plane->max_stride = i9xx_plane_max_stride;
-		intel_plane->update_plane = vlv_update_plane;
-		intel_plane->disable_plane = vlv_disable_plane;
-		intel_plane->get_hw_state = vlv_plane_get_hw_state;
-		intel_plane->check_plane = vlv_sprite_check;
+	if (INTEL_GEN(dev_priv) >= 10)
+		supported_rotations |= DRM_MODE_REFLECT_X;
 
-		plane_formats = vlv_plane_formats;
-		num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+	drm_plane_create_rotation_property(&plane->base,
+					   DRM_MODE_ROTATE_0,
+					   supported_rotations);
+
+	drm_plane_create_color_properties(&plane->base,
+					  BIT(DRM_COLOR_YCBCR_BT601) |
+					  BIT(DRM_COLOR_YCBCR_BT709),
+					  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+					  BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+					  DRM_COLOR_YCBCR_BT709,
+					  DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+	drm_plane_create_alpha_property(&plane->base);
+	drm_plane_create_blend_mode_property(&plane->base,
+					     BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+					     BIT(DRM_MODE_BLEND_PREMULTI) |
+					     BIT(DRM_MODE_BLEND_COVERAGE));
+
+	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+	return plane;
+
+fail:
+	intel_plane_free(plane);
+
+	return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, int sprite)
+{
+	struct intel_plane *plane;
+	const struct drm_plane_funcs *plane_funcs;
+	unsigned long possible_crtcs;
+	unsigned int supported_rotations;
+	const u64 *modifiers;
+	const u32 *formats;
+	int num_formats;
+	int ret;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		return skl_universal_plane_create(dev_priv, pipe,
+						  PLANE_SPRITE0 + sprite);
+
+	plane = intel_plane_alloc();
+	if (IS_ERR(plane))
+		return plane;
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		plane->max_stride = i9xx_plane_max_stride;
+		plane->update_plane = vlv_update_plane;
+		plane->disable_plane = vlv_disable_plane;
+		plane->get_hw_state = vlv_plane_get_hw_state;
+		plane->check_plane = vlv_sprite_check;
+
+		formats = vlv_plane_formats;
+		num_formats = ARRAY_SIZE(vlv_plane_formats);
 		modifiers = i9xx_plane_format_modifiers;
 
 		plane_funcs = &vlv_sprite_funcs;
 	} else if (INTEL_GEN(dev_priv) >= 7) {
-		intel_plane->max_stride = g4x_sprite_max_stride;
-		intel_plane->update_plane = ivb_update_plane;
-		intel_plane->disable_plane = ivb_disable_plane;
-		intel_plane->get_hw_state = ivb_plane_get_hw_state;
-		intel_plane->check_plane = g4x_sprite_check;
+		plane->max_stride = g4x_sprite_max_stride;
+		plane->update_plane = ivb_update_plane;
+		plane->disable_plane = ivb_disable_plane;
+		plane->get_hw_state = ivb_plane_get_hw_state;
+		plane->check_plane = g4x_sprite_check;
 
-		plane_formats = snb_plane_formats;
-		num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		formats = snb_plane_formats;
+		num_formats = ARRAY_SIZE(snb_plane_formats);
 		modifiers = i9xx_plane_format_modifiers;
 
 		plane_funcs = &snb_sprite_funcs;
 	} else {
-		intel_plane->max_stride = g4x_sprite_max_stride;
-		intel_plane->update_plane = g4x_update_plane;
-		intel_plane->disable_plane = g4x_disable_plane;
-		intel_plane->get_hw_state = g4x_plane_get_hw_state;
-		intel_plane->check_plane = g4x_sprite_check;
+		plane->max_stride = g4x_sprite_max_stride;
+		plane->update_plane = g4x_update_plane;
+		plane->disable_plane = g4x_disable_plane;
+		plane->get_hw_state = g4x_plane_get_hw_state;
+		plane->check_plane = g4x_sprite_check;
 
 		modifiers = i9xx_plane_format_modifiers;
 		if (IS_GEN6(dev_priv)) {
-			plane_formats = snb_plane_formats;
-			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+			formats = snb_plane_formats;
+			num_formats = ARRAY_SIZE(snb_plane_formats);
 
 			plane_funcs = &snb_sprite_funcs;
 		} else {
-			plane_formats = g4x_plane_formats;
-			num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+			formats = g4x_plane_formats;
+			num_formats = ARRAY_SIZE(g4x_plane_formats);
 
 			plane_funcs = &g4x_sprite_funcs;
 		}
 	}
 
-	if (INTEL_GEN(dev_priv) >= 9) {
-		supported_rotations =
-			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-	} else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 		supported_rotations =
 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
 			DRM_MODE_REFLECT_X;
@@ -1868,35 +2185,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
 	}
 
-	intel_plane->pipe = pipe;
-	intel_plane->i9xx_plane = plane;
-	intel_plane->id = PLANE_SPRITE0 + plane;
-	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
+	plane->pipe = pipe;
+	plane->id = PLANE_SPRITE0 + sprite;
+	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
 
-	possible_crtcs = (1 << pipe);
+	possible_crtcs = BIT(pipe);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-					       possible_crtcs, plane_funcs,
-					       plane_formats, num_plane_formats,
-					       modifiers,
-					       DRM_PLANE_TYPE_OVERLAY,
-					       "plane %d%c", plane + 2, pipe_name(pipe));
-	else
-		ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-					       possible_crtcs, plane_funcs,
-					       plane_formats, num_plane_formats,
-					       modifiers,
-					       DRM_PLANE_TYPE_OVERLAY,
-					       "sprite %c", sprite_name(pipe, plane));
+	ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+				       possible_crtcs, plane_funcs,
+				       formats, num_formats, modifiers,
+				       DRM_PLANE_TYPE_OVERLAY,
+				       "sprite %c", sprite_name(pipe, sprite));
 	if (ret)
 		goto fail;
 
-	drm_plane_create_rotation_property(&intel_plane->base,
+	drm_plane_create_rotation_property(&plane->base,
 					   DRM_MODE_ROTATE_0,
 					   supported_rotations);
 
-	drm_plane_create_color_properties(&intel_plane->base,
+	drm_plane_create_color_properties(&plane->base,
 					  BIT(DRM_COLOR_YCBCR_BT601) |
 					  BIT(DRM_COLOR_YCBCR_BT709),
 					  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1904,13 +2211,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 					  DRM_COLOR_YCBCR_BT709,
 					  DRM_COLOR_YCBCR_LIMITED_RANGE);
 
-	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
 
-	return intel_plane;
+	return plane;
 
 fail:
-	kfree(state);
-	kfree(intel_plane);
+	intel_plane_free(plane);
 
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb..860f306 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return false;
 
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	adjusted_mode->crtc_clock = tv_mode->clock;
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
 	return count;
 }
 
-static void
-intel_tv_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_tv_destroy,
+	.destroy = intel_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81..b34c318 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
 
 		intel_guc_init_params(guc);
 		ret = intel_guc_fw_upload(guc);
-		if (ret == 0 || ret != -EAGAIN)
+		if (ret == 0 || ret != -ETIMEDOUT)
 			break;
 
 		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa..0e3bd58 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
 	return uc_fw->path != NULL;
 }
 
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+	return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+}
+
 static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
 {
-	if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+	if (intel_uc_fw_is_loaded(uc_fw))
 		uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c..9289515 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
 		}
-	} else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+	} else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
 		dev_priv->uncore.funcs.force_wake_get =
 			fw_domains_get_with_fallback;
 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf..bf3662a 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
 	ICL_DDC_BUS_PORT_4,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
 #define VBT_DP_MAX_LINK_RATE_HBR3	0
 #define VBT_DP_MAX_LINK_RATE_HBR2	1
 #define VBT_DP_MAX_LINK_RATE_HBR	2
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 0000000..c56ba0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ *         Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+	ROW_INDEX_6BPP = 0,
+	ROW_INDEX_8BPP,
+	ROW_INDEX_10BPP,
+	ROW_INDEX_12BPP,
+	ROW_INDEX_15BPP,
+	MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+	COLUMN_INDEX_8BPC = 0,
+	COLUMN_INDEX_10BPC,
+	COLUMN_INDEX_12BPC,
+	COLUMN_INDEX_14BPC,
+	COLUMN_INDEX_16BPC,
+	MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN		1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+	896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+	7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+	u16 initial_xmit_delay;
+	u8 first_line_bpg_offset;
+	u16 initial_offset;
+	u8 flatness_min_qp;
+	u8 flatness_max_qp;
+	u8 rc_quant_incr_limit0;
+	u8 rc_quant_incr_limit1;
+	struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+	/* 6BPP/8BPC */
+	{ 768, 15, 6144, 3, 13, 11, 11, {
+		{ 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+		{ 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+		{ 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+		{ 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+		}
+	},
+	/* 6BPP/10BPC */
+	{ 768, 15, 6144, 7, 17, 15, 15, {
+		{ 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+		{ 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+		{ 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+		{ 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+		{ 17, 18, -12 }
+		}
+	},
+	/* 6BPP/12BPC */
+	{ 768, 15, 6144, 11, 21, 19, 19, {
+		{ 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+		{ 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+		{ 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+		{ 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+		{ 21, 22, -12 }
+		}
+	},
+	/* 6BPP/14BPC */
+	{ 768, 15, 6144, 15, 25, 23, 27, {
+		{ 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+		{ 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+		{ 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+		{ 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+		{ 25, 26, -12 }
+		}
+	},
+	/* 6BPP/16BPC */
+	{ 768, 15, 6144, 19, 29, 27, 27, {
+		{ 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+		{ 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+		{ 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+		{ 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+		{ 29, 30, -12 }
+		}
+	},
+},
+{
+	/* 8BPP/8BPC */
+	{ 512, 12, 6144, 3, 12, 11, 11, {
+		{ 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+		{ 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+		}
+	},
+	/* 8BPP/10BPC */
+	{ 512, 12, 6144, 7, 16, 15, 15, {
+		{ 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+		{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+		{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+		}
+	},
+	/* 8BPP/12BPC */
+	{ 512, 12, 6144, 11, 20, 19, 19, {
+		{ 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+		{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+		{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+		{ 21, 23, -12 }
+		}
+	},
+	/* 8BPP/14BPC */
+	{ 512, 12, 6144, 15, 24, 23, 23, {
+		{ 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+		{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+		{ 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+		{ 24, 25, -12 }
+		}
+	},
+	/* 8BPP/16BPC */
+	{ 512, 12, 6144, 19, 28, 27, 27, {
+		{ 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+		{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+		{ 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+		{ 28, 29, -12 }
+		}
+	},
+},
+{
+	/* 10BPP/8BPC */
+	{ 410, 15, 5632, 3, 12, 11, 11, {
+		{ 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+		{ 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+		}
+	},
+	/* 10BPP/10BPC */
+	{ 410, 15, 5632, 7, 16, 15, 15, {
+		{ 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+		{ 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+		{ 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+		}
+	},
+	/* 10BPP/12BPC */
+	{ 410, 15, 5632, 11, 20, 19, 19, {
+		{ 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+		{ 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+		{ 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+		{ 19, 20, -12 }
+		}
+	},
+	/* 10BPP/14BPC */
+	{ 410, 15, 5632, 15, 24, 23, 23, {
+		{ 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+		{ 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+		{ 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+		{ 23, 24, -12 }
+		}
+	},
+	/* 10BPP/16BPC */
+	{ 410, 15, 5632, 19, 28, 27, 27, {
+		{ 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+		{ 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+		{ 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+		{ 27, 28, -12 }
+		}
+	},
+},
+{
+	/* 12BPP/8BPC */
+	{ 341, 15, 2048, 3, 12, 11, 11, {
+		{ 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+		{ 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+		{ 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+		{ 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+		}
+	},
+	/* 12BPP/10BPC */
+	{ 341, 15, 2048, 7, 16, 15, 15, {
+		{ 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+		{ 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+		{ 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+		{ 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+		}
+	},
+	/* 12BPP/12BPC */
+	{ 341, 15, 2048, 11, 20, 19, 19, {
+		{ 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+		{ 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+		{ 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+		{ 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+		{ 21, 23, -12 }
+		}
+	},
+	/* 12BPP/14BPC */
+	{ 341, 15, 2048, 15, 24, 23, 23, {
+		{ 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+		{ 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+		{ 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+		{ 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+		{ 22, 23, -12 }
+		}
+	},
+	/* 12BPP/16BPC */
+	{ 341, 15, 2048, 19, 28, 27, 27, {
+		{ 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+		{ 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+		{ 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+		{ 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+		{ 26, 27, -12 }
+		}
+	},
+},
+{
+	/* 15BPP/8BPC */
+	{ 273, 15, 2048, 3, 12, 11, 11, {
+		{ 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+		{ 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+		{ 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+		{ 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+		}
+	},
+	/* 15BPP/10BPC */
+	{ 273, 15, 2048, 7, 16, 15, 15, {
+		{ 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+		{ 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+		{ 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+		{ 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+		}
+	},
+	/* 15BPP/12BPC */
+	{ 273, 15, 2048, 11, 20, 19, 19, {
+		{ 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+		{ 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+		{ 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+		{ 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+		{ 16, 17, -12 }
+		}
+	},
+	/* 15BPP/14BPC */
+	{ 273, 15, 2048, 15, 24, 23, 23, {
+		{ 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+		{ 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+		{ 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+		{ 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+		{ 20, 21, -12 }
+		}
+	},
+	/* 15BPP/16BPC */
+	{ 273, 15, 2048, 19, 28, 27, 27, {
+		{ 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+		{ 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+		{ 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+		{ 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+		{ 24, 25, -12 }
+		}
+	}
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+	switch (compressed_bpp) {
+	case 6:
+		return ROW_INDEX_6BPP;
+	case 8:
+		return ROW_INDEX_8BPP;
+	case 10:
+		return ROW_INDEX_10BPP;
+	case 12:
+		return ROW_INDEX_12BPP;
+	case 15:
+		return ROW_INDEX_15BPP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+	switch (bits_per_component) {
+	case 8:
+		return COLUMN_INDEX_8BPC;
+	case 10:
+		return COLUMN_INDEX_10BPC;
+	case 12:
+		return COLUMN_INDEX_12BPC;
+	case 14:
+		return COLUMN_INDEX_14BPC;
+	case 16:
+		return COLUMN_INDEX_16BPC;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+	unsigned long groups_per_line = 0;
+	unsigned long groups_total = 0;
+	unsigned long num_extra_mux_bits = 0;
+	unsigned long slice_bits = 0;
+	unsigned long hrd_delay = 0;
+	unsigned long final_scale = 0;
+	unsigned long rbs_min = 0;
+
+	/* Number of groups used to code each line of a slice */
+	groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+				       DSC_RC_PIXELS_PER_GROUP);
+
+	/* chunksize in Bytes */
+	vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+						  vdsc_cfg->bits_per_pixel,
+						  (8 * 16));
+
+	if (vdsc_cfg->convert_rgb)
+		num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+					  (4 * vdsc_cfg->bits_per_component + 4)
+					  - 2);
+	else
+		num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+			(4 * vdsc_cfg->bits_per_component + 4) +
+			2 * (4 * vdsc_cfg->bits_per_component) - 2;
+	/* Number of bits in one Slice */
+	slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+	while ((num_extra_mux_bits > 0) &&
+	       ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+		num_extra_mux_bits--;
+
+	if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+		vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+	/* scale_decrement_interval calculation according to DSC spec 1.11 */
+	if (vdsc_cfg->initial_scale_value > 8)
+		vdsc_cfg->scale_decrement_interval = groups_per_line /
+			(vdsc_cfg->initial_scale_value - 8);
+	else
+		vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+	vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+		(vdsc_cfg->initial_xmit_delay *
+		 vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+	if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+		DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+		return -ERANGE;
+	}
+
+	final_scale = (vdsc_cfg->rc_model_size * 8) /
+		(vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+	if (vdsc_cfg->slice_height > 1)
+		/*
+		 * NflBpgOffset is 16 bit value with 11 fractional bits
+		 * hence we multiply by 2^11 for preserving the
+		 * fractional part
+		 */
+		vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+							(vdsc_cfg->slice_height - 1));
+	else
+		vdsc_cfg->nfl_bpg_offset = 0;
+
+	/* 2^16 - 1 */
+	if (vdsc_cfg->nfl_bpg_offset > 65535) {
+		DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+		return -ERANGE;
+	}
+
+	/* Number of groups used to code the entire slice */
+	groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+	/* slice_bpg_offset is 16 bit value with 11 fractional bits */
+	vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+						    vdsc_cfg->initial_offset +
+						    num_extra_mux_bits) << 11),
+						  groups_total);
+
+	if (final_scale > 9) {
+		/*
+		 * ScaleIncrementInterval =
+		 * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+		 * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+		 * we need divide by 2^11 from pstDscCfg values
+		 */
+		vdsc_cfg->scale_increment_interval =
+				(vdsc_cfg->final_offset * (1 << 11)) /
+				((vdsc_cfg->nfl_bpg_offset +
+				vdsc_cfg->slice_bpg_offset) *
+				(final_scale - 9));
+	} else {
+		/*
+		 * If finalScaleValue is less than or equal to 9, a value of 0 should
+		 * be used to disable the scale increment at the end of the slice
+		 */
+		vdsc_cfg->scale_increment_interval = 0;
+	}
+
+	if (vdsc_cfg->scale_increment_interval > 65535) {
+		DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+		return -ERANGE;
+	}
+
+	/*
+	 * DSC spec mentions that bits_per_pixel specifies the target
+	 * bits/pixel (bpp) rate that is used by the encoder,
+	 * in steps of 1/16 of a bit per pixel
+	 */
+	rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+		DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+			     vdsc_cfg->bits_per_pixel, 16) +
+		groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+	hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+	vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+	vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+	return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
+{
+	struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+	u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+	u8 i = 0;
+	int row_index = 0;
+	int column_index = 0;
+	u8 line_buf_depth = 0;
+
+	vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+	vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+	vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+					     pipe_config->dsc_params.slice_count);
+	/*
+	 * Slice Height of 8 works for all currently available panels. So start
+	 * with that if pic_height is an integral multiple of 8.
+	 * Eventually add logic to try multiple slice heights.
+	 */
+	if (vdsc_cfg->pic_height % 8 == 0)
+		vdsc_cfg->slice_height = 8;
+	else if (vdsc_cfg->pic_height % 4 == 0)
+		vdsc_cfg->slice_height = 4;
+	else
+		vdsc_cfg->slice_height = 2;
+
+	/* Values filled from DSC Sink DPCD */
+	vdsc_cfg->dsc_version_major =
+		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+	vdsc_cfg->dsc_version_minor =
+		min(DSC_SUPPORTED_VERSION_MIN,
+		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+		DP_DSC_RGB;
+
+	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+	if (!line_buf_depth) {
+		DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+		return -EINVAL;
+	}
+	if (vdsc_cfg->dsc_version_minor == 2)
+		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+	else
+		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+	/* Gen 11 does not support YCbCr */
+	vdsc_cfg->enable422 = false;
+	/* Gen 11 does not support VBR */
+	vdsc_cfg->vbr_enable = false;
+	vdsc_cfg->block_pred_enable =
+			intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+	/* Gen 11 only supports integral values of bpp */
+	vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+	vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+		/*
+		 * six 0s are appended to the lsb of each threshold value
+		 * internally in h/w.
+		 * Only 8 bits are allowed for programming RcBufThreshold
+		 */
+		vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+	}
+
+	/*
+	 * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+	 * as per C Model
+	 */
+	if (compressed_bpp == 6) {
+		vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+		vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+	}
+
+	row_index = get_row_index_for_rc_params(compressed_bpp);
+	column_index =
+		get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+	if (row_index < 0 || column_index < 0)
+		return -EINVAL;
+
+	vdsc_cfg->first_line_bpg_offset =
+		rc_params[row_index][column_index].first_line_bpg_offset;
+	vdsc_cfg->initial_xmit_delay =
+		rc_params[row_index][column_index].initial_xmit_delay;
+	vdsc_cfg->initial_offset =
+		rc_params[row_index][column_index].initial_offset;
+	vdsc_cfg->flatness_min_qp =
+		rc_params[row_index][column_index].flatness_min_qp;
+	vdsc_cfg->flatness_max_qp =
+		rc_params[row_index][column_index].flatness_max_qp;
+	vdsc_cfg->rc_quant_incr_limit0 =
+		rc_params[row_index][column_index].rc_quant_incr_limit0;
+	vdsc_cfg->rc_quant_incr_limit1 =
+		rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		vdsc_cfg->rc_range_params[i].range_min_qp =
+			rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+		vdsc_cfg->rc_range_params[i].range_max_qp =
+			rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+		/*
+		 * Range BPG Offset uses 2's complement and is only a 6 bits. So
+		 * mask it to get only 6 bits.
+		 */
+		vdsc_cfg->rc_range_params[i].range_bpg_offset =
+			rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+			DSC_RANGE_BPG_OFFSET_MASK;
+	}
+
+	/*
+	 * BitsPerComponent value determines mux_word_size:
+	 * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+	 * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+	 * 48 bits
+	 */
+	if (vdsc_cfg->bits_per_component == 8 ||
+	    vdsc_cfg->bits_per_component == 10)
+		vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+	else if (vdsc_cfg->bits_per_component == 12)
+		vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+	/* RC_MODEL_SIZE is a constant across all configurations */
+	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+	/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+	vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+		(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+	return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+	/*
+	 * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+	 * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+	 * For any other transcoder, VDSC/joining uses the power well associated
+	 * with the pipe/transcoder in use. Hence another reference on the
+	 * transcoder power domain will suffice.
+	 */
+	if (cpu_transcoder == TRANSCODER_EDP)
+		return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+	else
+		return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+						const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+	enum pipe pipe = crtc->pipe;
+	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+	u32 pps_val = 0;
+	u32 rc_buf_thresh_dword[4];
+	u32 rc_range_params_dword[8];
+	u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+	int i = 0;
+
+	/* Populate PICTURE_PARAMETER_SET_0 registers */
+	pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+		DSC_VER_MIN_SHIFT |
+		vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+		vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+	if (vdsc_cfg->block_pred_enable)
+		pps_val |= DSC_BLOCK_PREDICTION;
+	if (vdsc_cfg->convert_rgb)
+		pps_val |= DSC_COLOR_SPACE_CONVERSION;
+	if (vdsc_cfg->enable422)
+		pps_val |= DSC_422_ENABLE;
+	if (vdsc_cfg->vbr_enable)
+		pps_val |= DSC_VBR_ENABLE;
+	DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_1 registers */
+	pps_val = 0;
+	pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+	DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_2 registers */
+	pps_val = 0;
+	pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+		DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+	DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+	if (encoder->type == INTEL_OUTPUT_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_3 registers */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+		DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+	DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_4 registers */
+	pps_val = 0;
+	pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+		DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+	DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_5 registers */
+	pps_val = 0;
+	pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+		DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+	DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_6 registers */
+	pps_val = 0;
+	pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+		DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+		DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+		DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+	DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_7 registers */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+		DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+	DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_8 registers */
+	pps_val = 0;
+	pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+		DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+	DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_9 registers */
+	pps_val = 0;
+	pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+		DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+	DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+				   pps_val);
+	}
+
+	/* Populate PICTURE_PARAMETER_SET_10 registers */
+	pps_val = 0;
+	pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+		DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+		DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+		DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+	DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+				   pps_val);
+	}
+
+	/* Populate Picture parameter set 16 */
+	pps_val = 0;
+	pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+		DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+				   vdsc_cfg->slice_width) |
+		DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+					vdsc_cfg->slice_height);
+	DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+		/*
+		 * If 2 VDSC instances are needed, configure PPS for second
+		 * VDSC
+		 */
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+	} else {
+		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+		if (crtc_state->dsc_params.dsc_split)
+			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+				   pps_val);
+	}
+
+	/* Populate the RC_BUF_THRESH registers */
+	memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+	for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+		rc_buf_thresh_dword[i / 4] |=
+			(u32)(vdsc_cfg->rc_buf_thresh[i] <<
+			      BITS_PER_BYTE * (i % 4));
+		DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+			 rc_buf_thresh_dword[i / 4]);
+	}
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+		I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(DSCC_RC_BUF_THRESH_0,
+				   rc_buf_thresh_dword[0]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+				   rc_buf_thresh_dword[1]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_1,
+				   rc_buf_thresh_dword[2]);
+			I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+				   rc_buf_thresh_dword[3]);
+		}
+	} else {
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+			   rc_buf_thresh_dword[0]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+			   rc_buf_thresh_dword[1]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+			   rc_buf_thresh_dword[2]);
+		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+			   rc_buf_thresh_dword[3]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+				   rc_buf_thresh_dword[0]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+				   rc_buf_thresh_dword[1]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+				   rc_buf_thresh_dword[2]);
+			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+				   rc_buf_thresh_dword[3]);
+		}
+	}
+
+	/* Populate the RC_RANGE_PARAMETERS registers */
+	memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+		rc_range_params_dword[i / 2] |=
+			(u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+				RC_BPG_OFFSET_SHIFT) |
+			       (vdsc_cfg->rc_range_params[i].range_max_qp <<
+				RC_MAX_QP_SHIFT) |
+			       (vdsc_cfg->rc_range_params[i].range_min_qp <<
+				RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+		DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+			 rc_range_params_dword[i / 2]);
+	}
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+			   rc_range_params_dword[0]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+			   rc_range_params_dword[1]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+			   rc_range_params_dword[2]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+			   rc_range_params_dword[3]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+			   rc_range_params_dword[4]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+			   rc_range_params_dword[5]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+			   rc_range_params_dword[6]);
+		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+			   rc_range_params_dword[7]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+				   rc_range_params_dword[0]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+				   rc_range_params_dword[1]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+				   rc_range_params_dword[2]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+				   rc_range_params_dword[3]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+				   rc_range_params_dword[4]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+				   rc_range_params_dword[5]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+				   rc_range_params_dword[6]);
+			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+				   rc_range_params_dword[7]);
+		}
+	} else {
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+			   rc_range_params_dword[0]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+			   rc_range_params_dword[1]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+			   rc_range_params_dword[2]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+			   rc_range_params_dword[3]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+			   rc_range_params_dword[4]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+			   rc_range_params_dword[5]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+			   rc_range_params_dword[6]);
+		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+			   rc_range_params_dword[7]);
+		if (crtc_state->dsc_params.dsc_split) {
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+				   rc_range_params_dword[0]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+				   rc_range_params_dword[1]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+				   rc_range_params_dword[2]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+				   rc_range_params_dword[3]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+				   rc_range_params_dword[4]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+				   rc_range_params_dword[5]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+				   rc_range_params_dword[6]);
+			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+				   rc_range_params_dword[7]);
+		}
+	}
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+				       const struct intel_crtc_state *crtc_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+	struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+	/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+	drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+	/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+	drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+	intel_dig_port->write_infoframe(encoder, crtc_state,
+					DP_SDP_PPS, &dp_dsc_pps_sdp,
+					sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+	u32 dss_ctl1_val = 0;
+	u32 dss_ctl2_val = 0;
+
+	if (!crtc_state->dsc_params.compression_enable)
+		return;
+
+	/* Enable Power wells for VDSC/joining */
+	intel_display_power_get(dev_priv,
+				intel_dsc_power_domain(crtc_state));
+
+	intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+	intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+	if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+		dss_ctl1_reg = DSS_CTL1;
+		dss_ctl2_reg = DSS_CTL2;
+	} else {
+		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+	}
+	dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+	if (crtc_state->dsc_params.dsc_split) {
+		dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+		dss_ctl1_val |= JOINER_ENABLE;
+	}
+	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+	u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+	if (!old_crtc_state->dsc_params.compression_enable)
+		return;
+
+	if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+		dss_ctl1_reg = DSS_CTL1;
+		dss_ctl2_reg = DSS_CTL2;
+	} else {
+		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+	}
+	dss_ctl1_val = I915_READ(dss_ctl1_reg);
+	if (dss_ctl1_val & JOINER_ENABLE)
+		dss_ctl1_val &= ~JOINER_ENABLE;
+	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+	dss_ctl2_val = I915_READ(dss_ctl2_reg);
+	if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+	    dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+		dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+				  RIGHT_BRANCH_VDSC_ENABLE);
+	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+	/* Disable Power wells for VDSC/joining */
+	intel_display_power_put(dev_priv,
+				intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 6e58089..4f41e32 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -53,67 +53,107 @@ static void wa_init_start(struct i915_wa_list *wal, const char *name)
 	wal->name = name;
 }
 
+#define WA_LIST_CHUNK (1 << 4)
+
 static void wa_init_finish(struct i915_wa_list *wal)
 {
+	/* Trim unused entries. */
+	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+		struct i915_wa *list = kmemdup(wal->list,
+					       wal->count * sizeof(*list),
+					       GFP_KERNEL);
+
+		if (list) {
+			kfree(wal->list);
+			wal->list = list;
+		}
+	}
+
 	if (!wal->count)
 		return;
 
 	DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
-			 wal->count, wal->name);
+			 wal->wa_count, wal->name);
 }
 
-static void wa_add(struct drm_i915_private *i915,
-		   i915_reg_t reg, const u32 mask, const u32 val)
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 {
-	struct i915_workarounds *wa = &i915->workarounds;
-	unsigned int start = 0, end = wa->count;
-	unsigned int addr = i915_mmio_reg_offset(reg);
-	struct i915_wa_reg *r;
+	unsigned int addr = i915_mmio_reg_offset(wa->reg);
+	unsigned int start = 0, end = wal->count;
+	const unsigned int grow = WA_LIST_CHUNK;
+	struct i915_wa *wa_;
+
+	GEM_BUG_ON(!is_power_of_2(grow));
+
+	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+		struct i915_wa *list;
+
+		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+				     GFP_KERNEL);
+		if (!list) {
+			DRM_ERROR("No space for workaround init!\n");
+			return;
+		}
+
+		if (wal->list)
+			memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+		wal->list = list;
+	}
 
 	while (start < end) {
 		unsigned int mid = start + (end - start) / 2;
 
-		if (wa->reg[mid].addr < addr) {
+		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
 			start = mid + 1;
-		} else if (wa->reg[mid].addr > addr) {
+		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
 			end = mid;
 		} else {
-			r = &wa->reg[mid];
+			wa_ = &wal->list[mid];
 
-			if ((mask & ~r->mask) == 0) {
+			if ((wa->mask & ~wa_->mask) == 0) {
 				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
-					  addr, r->mask, r->value);
+					  i915_mmio_reg_offset(wa_->reg),
+					  wa_->mask, wa_->val);
 
-				r->value &= ~mask;
+				wa_->val &= ~wa->mask;
 			}
 
-			r->value |= val;
-			r->mask  |= mask;
+			wal->wa_count++;
+			wa_->val |= wa->val;
+			wa_->mask |= wa->mask;
 			return;
 		}
 	}
 
-	if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
-		DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
-			  addr, mask, val);
-		return;
-	}
+	wal->wa_count++;
+	wa_ = &wal->list[wal->count++];
+	*wa_ = *wa;
 
-	r = &wa->reg[wa->count++];
-	r->addr  = addr;
-	r->value = val;
-	r->mask  = mask;
-
-	while (r-- > wa->reg) {
-		GEM_BUG_ON(r[0].addr == r[1].addr);
-		if (r[1].addr > r[0].addr)
+	while (wa_-- > wal->list) {
+		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+			   i915_mmio_reg_offset(wa_[1].reg));
+		if (i915_mmio_reg_offset(wa_[1].reg) >
+		    i915_mmio_reg_offset(wa_[0].reg))
 			break;
 
-		swap(r[1], r[0]);
+		swap(wa_[1], wa_[0]);
 	}
 }
 
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = mask,
+		.val = val
+	};
+
+	_wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
 
 #define WA_SET_BIT_MASKED(addr, mask) \
 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -124,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
 	WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
 
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -169,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
 			    GEN6_WIZ_HASHING_MASK,
 			    GEN6_WIZ_HASHING_16x4);
-
-	return 0;
 }
 
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen8_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen8_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -199,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
-	return 0;
+			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 }
 
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen8_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen8_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:chv */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
 	/* Improve HiZ throughput on CHV. */
 	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
-	return 0;
 }
 
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	if (HAS_LLC(dev_priv)) {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+	if (HAS_LLC(i915)) {
 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
 		 *
 		 * Must match Display Engine. See
@@ -242,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
 	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
+	if (!IS_COFFEELAKE(i915))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
@@ -285,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  HDC_FORCE_NON_COHERENT);
 
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
-	if (IS_SKYLAKE(dev_priv) ||
-	    IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv))
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
@@ -314,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
-	if (IS_GEN9_LP(dev_priv))
+	if (IS_GEN9_LP(i915))
 		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
-	return 0;
 }
 
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
 
@@ -332,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 		 * Only consider slices where one, and only one, subslice has 7
 		 * EUs
 		 */
-		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+		if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
 			continue;
 
 		/*
@@ -341,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 		 *
 		 * ->    0 <= ss <= 3;
 		 */
-		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+		ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
 		vals[i] = 3 - ss;
 	}
 
 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
-		return 0;
+		return;
 
 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -356,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 			    GEN9_IZ_HASHING(2, vals[2]) |
 			    GEN9_IZ_HASHING(1, vals[1]) |
 			    GEN9_IZ_HASHING(0, vals[0]));
-
-	return 0;
 }
 
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
-
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
-	return skl_tune_iz_hashing(dev_priv);
+	gen9_ctx_workarounds_init(engine);
+	skl_tune_iz_hashing(engine);
 }
 
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaDisableThreadStallDopClockGating:bxt */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -386,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* WaToEnableHwFixForPushConstHWBug:bxt */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
 }
 
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
-
-	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE);
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	return 0;
 }
 
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:glk */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
 }
 
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	ret = gen9_ctx_workarounds_init(dev_priv);
-	if (ret)
-		return ret;
+	gen9_ctx_workarounds_init(engine);
 
 	/* WaToEnableHwFixForPushConstHWBug:cfl */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -445,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	return 0;
 }
 
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	/* WaForceContextSaveRestoreNonCoherent:cnl */
 	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
 
 	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
 		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
 
 	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -464,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
 
@@ -484,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 
 	/* WaDisableEarlyEOT:cnl */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
-	return 0;
 }
 
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
+
 	/* Wa_1604370585:icl (pre-prod)
 	 * Formerly known as WaPushConstantDereferenceHoldDisable
 	 */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
 				  PUSH_CONSTANT_DEREF_DISABLE);
 
@@ -509,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	/* Wa_2006611047:icl (pre-prod)
 	 * Formerly known as WaDisableImprovedTdlClkGating
 	 */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
 				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
 
@@ -518,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			  GEN11_STATE_CACHE_REDIRECT_TO_CS);
 
 	/* Wa_2006665173:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
 		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
 				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
-	return 0;
 }
 
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 {
-	int err = 0;
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-	dev_priv->workarounds.count = 0;
+	wa_init_start(wal, "context");
 
-	if (INTEL_GEN(dev_priv) < 8)
-		err = 0;
-	else if (IS_BROADWELL(dev_priv))
-		err = bdw_ctx_workarounds_init(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_ctx_workarounds_init(dev_priv);
-	else if (IS_SKYLAKE(dev_priv))
-		err = skl_ctx_workarounds_init(dev_priv);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_ctx_workarounds_init(dev_priv);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_ctx_workarounds_init(dev_priv);
-	else if (IS_GEMINILAKE(dev_priv))
-		err = glk_ctx_workarounds_init(dev_priv);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_ctx_workarounds_init(dev_priv);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_ctx_workarounds_init(dev_priv);
-	else if (IS_ICELAKE(dev_priv))
-		err = icl_ctx_workarounds_init(dev_priv);
+	if (INTEL_GEN(i915) < 8)
+		return;
+	else if (IS_BROADWELL(i915))
+		bdw_ctx_workarounds_init(engine);
+	else if (IS_CHERRYVIEW(i915))
+		chv_ctx_workarounds_init(engine);
+	else if (IS_SKYLAKE(i915))
+		skl_ctx_workarounds_init(engine);
+	else if (IS_BROXTON(i915))
+		bxt_ctx_workarounds_init(engine);
+	else if (IS_KABYLAKE(i915))
+		kbl_ctx_workarounds_init(engine);
+	else if (IS_GEMINILAKE(i915))
+		glk_ctx_workarounds_init(engine);
+	else if (IS_COFFEELAKE(i915))
+		cfl_ctx_workarounds_init(engine);
+	else if (IS_CANNONLAKE(i915))
+		cnl_ctx_workarounds_init(engine);
+	else if (IS_ICELAKE(i915))
+		icl_ctx_workarounds_init(engine);
 	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
-	if (err)
-		return err;
+		MISSING_CASE(INTEL_GEN(i915));
 
-	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
-			 dev_priv->workarounds.count);
-	return 0;
+	wa_init_finish(wal);
 }
 
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
-	struct i915_workarounds *w = &rq->i915->workarounds;
+	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+	struct i915_wa *wa;
+	unsigned int i;
 	u32 *cs;
-	int ret, i;
+	int ret;
 
-	if (w->count == 0)
+	if (wal->count == 0)
 		return 0;
 
 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
-	cs = intel_ring_begin(rq, (w->count * 2 + 2));
+	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
-	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
-	for (i = 0; i < w->count; i++) {
-		*cs++ = w->reg[i].addr;
-		*cs++ = w->reg[i].value;
+	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+		*cs++ = i915_mmio_reg_offset(wa->reg);
+		*cs++ = wa->val;
 	}
 	*cs++ = MI_NOOP;
 
@@ -595,32 +603,6 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
 }
 
 static void
-wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
-{
-	const unsigned int grow = 1 << 4;
-
-	GEM_BUG_ON(!is_power_of_2(grow));
-
-	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
-		struct i915_wa *list;
-
-		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
-				     GFP_KERNEL);
-		if (!list) {
-			DRM_ERROR("No space for workaround init!\n");
-			return;
-		}
-
-		if (wal->list)
-			memcpy(list, wal->list, sizeof(*wa) * wal->count);
-
-		wal->list = list;
-	}
-
-	wal->list[wal->count++] = *wa;
-}
-
-static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
 	struct i915_wa wa = {
@@ -629,7 +611,7 @@ wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 		.val = _MASKED_BIT_ENABLE(val)
 	};
 
-	wal_add(wal, &wa);
+	_wa_add(wal, &wa);
 }
 
 static void
@@ -642,7 +624,7 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
 		.val = val
 	};
 
-	wal_add(wal, &wa);
+	_wa_add(wal, &wa);
 }
 
 static void
@@ -982,29 +964,54 @@ void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
 	wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
 }
 
-struct whitelist {
-	i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
-	unsigned int count;
-	u32 nopid;
-};
-
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
 {
-	if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+	if ((cur ^ wa->val) & wa->mask) {
+		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+			  name, from, i915_mmio_reg_offset(wa->reg), cur,
+			  cur & wa->mask, wa->val, wa->mask);
+
+		return false;
+	}
+
+	return true;
+}
+
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+			   const struct i915_wa_list *wal,
+			   const char *from)
+{
+	struct i915_wa *wa;
+	unsigned int i;
+	bool ok = true;
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+	return ok;
+}
+
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+				 const char *from)
+{
+	return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
+}
+
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+	struct i915_wa wa = {
+		.reg = reg
+	};
+
+	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
 		return;
 
-	w->reg[w->count++] = reg;
+	_wa_add(wal, &wa);
 }
 
-static void bdw_whitelist_build(struct whitelist *w)
-{
-}
-
-static void chv_whitelist_build(struct whitelist *w)
-{
-}
-
-static void gen9_whitelist_build(struct whitelist *w)
+static void gen9_whitelist_build(struct i915_wa_list *w)
 {
 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
 	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -1016,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_HDC_CHICKEN1);
 }
 
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -1024,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 }
 
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -1037,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 
@@ -1045,37 +1052,41 @@ static void glk_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
 }
 
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
 {
 	gen9_whitelist_build(w);
 }
 
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
 {
 	/* WaEnablePreemptionGranularityControlByUMD:cnl */
 	whitelist_reg(w, GEN8_CS_CHICKEN1);
 }
 
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
 {
+	/* WaAllowUMDToModifyHalfSliceChicken7:icl */
+	whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+	/* WaAllowUMDToModifySamplerMode:icl */
+	whitelist_reg(w, GEN10_SAMPLER_MODE);
 }
 
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
-					 struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *w = &engine->whitelist;
 
 	GEM_BUG_ON(engine->id != RCS);
 
-	w->count = 0;
-	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+	wa_init_start(w, "whitelist");
 
 	if (INTEL_GEN(i915) < 8)
-		return NULL;
+		return;
 	else if (IS_BROADWELL(i915))
-		bdw_whitelist_build(w);
+		return;
 	else if (IS_CHERRYVIEW(i915))
-		chv_whitelist_build(w);
+		return;
 	else if (IS_SKYLAKE(i915))
 		skl_whitelist_build(w);
 	else if (IS_BROXTON(i915))
@@ -1093,37 +1104,30 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 
-	return w;
+	wa_init_finish(w);
 }
 
-static void whitelist_apply(struct intel_engine_cs *engine,
-			    const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
+	const struct i915_wa_list *wal = &engine->whitelist;
 	const u32 base = engine->mmio_base;
+	struct i915_wa *wa;
 	unsigned int i;
 
-	if (!w)
+	if (!wal->count)
 		return;
 
-	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
-	for (i = 0; i < w->count; i++)
-		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
-			      i915_mmio_reg_offset(w->reg[i]));
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+			   i915_mmio_reg_offset(wa->reg));
 
 	/* And clear the rest just in case of garbage */
 	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
-		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+		I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+			   i915_mmio_reg_offset(RING_NOPID(base)));
 
-	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
-}
-
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
-{
-	struct whitelist w;
-
-	whitelist_apply(engine, whitelist_build(engine, &w));
+	DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
 }
 
 static void rcs_engine_wa_init(struct intel_engine_cs *engine)
@@ -1171,17 +1175,19 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
 			    GEN8_L3SQCREG4,
 			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
 
-		/* Wa_1604302699:icl */
-		wa_write_or(wal,
-			    GEN10_L3_CHICKEN_MODE_REGISTER,
-			    GEN11_I2M_WRITE_DISABLE);
-
 		/* WaForwardProgressSoftReset:icl */
 		wa_write_or(wal,
 			    GEN10_SCRATCH_LNCF2,
 			    PMFLUSHDONE_LNICRSDROP |
 			    PMFLUSH_GAPL3UNBLOCK |
 			    PMFLUSHDONE_LNEBLK);
+
+		/* Wa_1406609255:icl (pre-prod) */
+		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+			wa_write_or(wal,
+				    GEN7_SARCHKMD,
+				    GEN7_DISABLE_DEMAND_PREFETCH |
+				    GEN7_DISABLE_SAMPLER_PREFETCH);
 	}
 
 	if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
@@ -1267,5 +1273,11 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+					    const char *from)
+{
+	return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
 #include "selftests/intel_workarounds.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 979695a..7c73471 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -19,6 +19,7 @@ struct i915_wa_list {
 	const char	*name;
 	struct i915_wa	*list;
 	unsigned int	count;
+	unsigned int	wa_count;
 };
 
 static inline void intel_wa_list_free(struct i915_wa_list *wal)
@@ -27,13 +28,16 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
 	memset(wal, 0, sizeof(*wal));
 }
 
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
 
 void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
 void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+				 const char *from);
 
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
 
 void intel_engine_init_workarounds(struct intel_engine_cs *engine);
 void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5c22f2c..26c065c 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	n = 0;
 	for_each_engine(engine, i915, id) {
 		if (!intel_engine_can_store_dword(engine)) {
-			pr_info("store-dword-imm not supported on engine=%u\n", id);
+			pr_info("store-dword-imm not supported on engine=%u\n",
+				id);
 			continue;
 		}
 		engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		engine = engines[order[i] % n];
 		i = (i + 1) % (n * I915_NUM_ENGINES);
 
-		err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+		/*
+		 * In order to utilize 64K pages we need to both pad the vma
+		 * size and ensure the vma offset is at the start of the pt
+		 * boundary, however to improve coverage we opt for testing both
+		 * aligned and unaligned offsets.
+		 */
+		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+			offset_low = round_down(offset_low,
+						I915_GTT_PAGE_SIZE_2M);
+
+		err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+				       dword, num + 1);
 		if (err)
 			break;
 
-		err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+		err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+				       dword, num + 1);
 		if (err)
 			break;
 
 		if (igt_timeout(end_time,
 				"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
-				__func__, engine->id, offset_low, offset_high, max_page_size))
+				__func__, engine->id, offset_low, offset_high,
+				max_page_size))
 			break;
 	}
 
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
 	 * huge-gtt-pages.
 	 */
 
-	if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+	if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
 		pr_info("48b PPGTT not supported, skipping\n");
 		return 0;
 	}
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
 		SUBTEST(igt_mock_ppgtt_huge_fill),
 		SUBTEST(igt_mock_ppgtt_64K),
 	};
-	int saved_ppgtt = i915_modparams.enable_ppgtt;
 	struct drm_i915_private *dev_priv;
-	struct pci_dev *pdev;
 	struct i915_hw_ppgtt *ppgtt;
+	struct pci_dev *pdev;
 	int err;
 
 	dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
 		return -ENOMEM;
 
 	/* Pretend to be a device which supports the 48b PPGTT */
-	i915_modparams.enable_ppgtt = 3;
+	mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
 
 	pdev = dev_priv->drm.pdev;
 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ int i915_gem_huge_page_mock_selftests(void)
 
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	i915_modparams.enable_ppgtt = saved_ppgtt;
-
 	drm_dev_put(&dev_priv->drm);
 
 	return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
 	struct i915_gem_context *ctx;
 	int err;
 
-	if (!USES_PPGTT(dev_priv)) {
+	if (!HAS_PPGTT(dev_priv)) {
 		pr_info("PPGTT not supported, skipping live-selftests\n");
 		return 0;
 	}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25a..7d82043 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
 	const char *func;
 	const char *name;
 
-	unsigned int reset_count;
+	unsigned int reset_global;
+	unsigned int reset_engine[I915_NUM_ENGINES];
 };
 
 static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
 			   const char *func,
 			   const char *name)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int err;
 
 	t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
 	}
 
 	i915->gpu_error.missed_irq_rings = 0;
-	t->reset_count = i915_reset_count(&i915->gpu_error);
+	t->reset_global = i915_reset_count(&i915->gpu_error);
+
+	for_each_engine(engine, i915, id)
+		t->reset_engine[id] =
+			i915_reset_engine_count(&i915->gpu_error, engine);
 
 	return 0;
 }
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
 static int end_live_test(struct live_test *t)
 {
 	struct drm_i915_private *i915 = t->i915;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
 		return -EIO;
 
-	if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+	if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
 		pr_err("%s(%s): GPU was reset %d times!\n",
 		       t->func, t->name,
-		       i915_reset_count(&i915->gpu_error) - t->reset_count);
+		       i915_reset_count(&i915->gpu_error) - t->reset_global);
+		return -EIO;
+	}
+
+	for_each_engine(engine, i915, id) {
+		if (t->reset_engine[id] ==
+		    i915_reset_engine_count(&i915->gpu_error, engine))
+			continue;
+
+		pr_err("%s(%s): engine '%s' was reset %d times!\n",
+		       t->func, t->name, engine->name,
+		       i915_reset_engine_count(&i915->gpu_error, engine) -
+		       t->reset_engine[id]);
 		return -EIO;
 	}
 
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj = NULL;
+	unsigned long ncontexts, ndwords, dw;
 	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
-	unsigned long ncontexts, ndwords, dw;
-	bool first_shared_gtt = true;
+	struct live_test t;
 	int err = -ENODEV;
 
 	/*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 
+	err = begin_live_test(&t, i915, __func__, "");
+	if (err)
+		goto out_unlock;
+
 	ncontexts = 0;
 	ndwords = 0;
 	dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
 		struct i915_gem_context *ctx;
 		unsigned int id;
 
-		if (first_shared_gtt) {
-			ctx = __create_hw_context(i915, file->driver_priv);
-			first_shared_gtt = false;
-		} else {
-			ctx = i915_gem_create_context(i915, file->driver_priv);
-		}
+		ctx = i915_gem_create_context(i915, file->driver_priv);
 		if (IS_ERR(ctx)) {
 			err = PTR_ERR(ctx);
 			goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
 	}
 
 out_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+	if (end_live_test(&t))
 		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj = NULL;
+	struct i915_gem_context *ctx;
+	struct i915_hw_ppgtt *ppgtt;
+	unsigned long ndwords, dw;
 	struct drm_file *file;
 	I915_RND_STATE(prng);
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
-	struct i915_gem_context *ctx;
-	struct i915_hw_ppgtt *ppgtt;
-	unsigned long ndwords, dw;
+	struct live_test t;
 	int err = -ENODEV;
 
 	/*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 
+	err = begin_live_test(&t, i915, __func__, "");
+	if (err)
+		goto out_unlock;
+
 	ctx = i915_gem_create_context(i915, file->driver_priv);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
 	}
 
 out_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+	if (end_live_test(&t))
+		err = -EIO;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	mock_file_free(i915, file);
+	return err;
+}
+
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+	struct drm_mm_node *node =
+		__drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+					offset, offset + sizeof(u32) - 1);
+	if (!node || node->start > offset)
+		return 0;
+
+	GEM_BUG_ON(offset >= node->start + node->size);
+
+	pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+	       upper_32_bits(offset), lower_32_bits(offset));
+	return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+			    struct intel_engine_cs *engine,
+			    u64 offset, u32 value)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	*cmd++ = MI_STORE_DWORD_IMM_GEN4;
+	if (INTEL_GEN(i915) >= 8) {
+		*cmd++ = lower_32_bits(offset);
+		*cmd++ = upper_32_bits(offset);
+	} else {
+		*cmd++ = 0;
+		*cmd++ = offset;
+	}
+	*cmd++ = value;
+	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		goto err;
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+	if (err)
+		goto err;
+
+	err = check_scratch(ctx, offset);
+	if (err)
+		goto err_unpin;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+	if (err)
+		goto err_request;
+
+	err = i915_vma_move_to_active(vma, rq, 0);
+	if (err)
+		goto skip_request;
+
+	i915_gem_object_set_active_reference(obj);
+	i915_vma_unpin(vma);
+	i915_vma_close(vma);
+
+	i915_request_add(rq);
+
+	return 0;
+
+skip_request:
+	i915_request_skip(rq, err);
+err_request:
+	i915_request_add(rq);
+err_unpin:
+	i915_vma_unpin(vma);
+err:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+			     struct intel_engine_cs *engine,
+			     u64 offset, u32 *value)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
+	const u32 result = 0x100;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	memset(cmd, POISON_INUSE, PAGE_SIZE);
+	if (INTEL_GEN(i915) >= 8) {
+		*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = lower_32_bits(offset);
+		*cmd++ = upper_32_bits(offset);
+		*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = result;
+		*cmd++ = 0;
+	} else {
+		*cmd++ = MI_LOAD_REGISTER_MEM;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = offset;
+		*cmd++ = MI_STORE_REGISTER_MEM;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = result;
+	}
+	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		goto err;
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+	if (err)
+		goto err;
+
+	err = check_scratch(ctx, offset);
+	if (err)
+		goto err_unpin;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+	if (err)
+		goto err_request;
+
+	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	if (err)
+		goto skip_request;
+
+	i915_vma_unpin(vma);
+	i915_vma_close(vma);
+
+	i915_request_add(rq);
+
+	err = i915_gem_object_set_to_cpu_domain(obj, false);
+	if (err)
+		goto err;
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	*value = cmd[result / sizeof(*cmd)];
+	i915_gem_object_unpin_map(obj);
+	i915_gem_object_put(obj);
+
+	return 0;
+
+skip_request:
+	i915_request_skip(rq, err);
+err_request:
+	i915_request_add(rq);
+err_unpin:
+	i915_vma_unpin(vma);
+err:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx_a, *ctx_b;
+	struct intel_engine_cs *engine;
+	struct drm_file *file;
+	I915_RND_STATE(prng);
+	unsigned long count;
+	struct live_test t;
+	unsigned int id;
+	u64 vm_total;
+	int err;
+
+	if (INTEL_GEN(i915) < 7)
+		return 0;
+
+	/*
+	 * The simple goal here is that a write into one context is not
+	 * observed in a second (separate page tables and scratch).
+	 */
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	err = begin_live_test(&t, i915, __func__, "");
+	if (err)
+		goto out_unlock;
+
+	ctx_a = i915_gem_create_context(i915, file->driver_priv);
+	if (IS_ERR(ctx_a)) {
+		err = PTR_ERR(ctx_a);
+		goto out_unlock;
+	}
+
+	ctx_b = i915_gem_create_context(i915, file->driver_priv);
+	if (IS_ERR(ctx_b)) {
+		err = PTR_ERR(ctx_b);
+		goto out_unlock;
+	}
+
+	/* We can only test vm isolation, if the vm are distinct */
+	if (ctx_a->ppgtt == ctx_b->ppgtt)
+		goto out_unlock;
+
+	vm_total = ctx_a->ppgtt->vm.total;
+	GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+	vm_total -= I915_GTT_PAGE_SIZE;
+
+	intel_runtime_pm_get(i915);
+
+	count = 0;
+	for_each_engine(engine, i915, id) {
+		IGT_TIMEOUT(end_time);
+		unsigned long this = 0;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		while (!__igt_timeout(end_time, NULL)) {
+			u32 value = 0xc5c5c5c5;
+			u64 offset;
+
+			div64_u64_rem(i915_prandom_u64_state(&prng),
+				      vm_total, &offset);
+			offset &= ~sizeof(u32);
+			offset += I915_GTT_PAGE_SIZE;
+
+			err = write_to_scratch(ctx_a, engine,
+					       offset, 0xdeadbeef);
+			if (err == 0)
+				err = read_from_scratch(ctx_b, engine,
+							offset, &value);
+			if (err)
+				goto out_rpm;
+
+			if (value) {
+				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+				       engine->name, value,
+				       upper_32_bits(offset),
+				       lower_32_bits(offset),
+				       this);
+				err = -EINVAL;
+				goto out_rpm;
+			}
+
+			this++;
+		}
+		count += this;
+	}
+	pr_info("Checked %lu scratch offsets across %d engines\n",
+		count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+	intel_runtime_pm_put(i915);
+out_unlock:
+	if (end_live_test(&t))
 		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 
@@ -865,33 +1207,6 @@ static int igt_switch_to_kernel_context(void *arg)
 	return err;
 }
 
-static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
-{
-	struct drm_i915_gem_object *obj;
-	int err;
-
-	err = i915_gem_init_aliasing_ppgtt(i915);
-	if (err)
-		return err;
-
-	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
-		struct i915_vma *vma;
-
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
-		if (IS_ERR(vma))
-			continue;
-
-		vma->flags &= ~I915_VMA_LOCAL_BIND;
-	}
-
-	return 0;
-}
-
-static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
-{
-	i915_gem_fini_aliasing_ppgtt(i915);
-}
-
 int i915_gem_context_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 		SUBTEST(live_nop_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
+		SUBTEST(igt_vm_isolation),
 	};
-	bool fake_alias = false;
-	int err;
 
 	if (i915_terminally_wedged(&dev_priv->gpu_error))
 		return 0;
 
-	/* Install a fake aliasing gtt for exercise */
-	if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		err = fake_aliasing_ppgtt_enable(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-		if (err)
-			return err;
-
-		GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
-		fake_alias = true;
-	}
-
-	err = i915_subtests(tests, dev_priv);
-
-	if (fake_alias) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		fake_aliasing_ppgtt_disable(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-	}
-
-	return err;
+	return i915_subtests(tests, dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1c..4365979 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
 	 * where the GTT space of the request is separate from the GGTT
 	 * allocation required to build the request.
 	 */
-	if (!USES_FULL_PPGTT(i915))
+	if (!HAS_FULL_PPGTT(i915))
 		return 0;
 
 	mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 127d815..69fe86b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
 
 	/* Allocate a ppggt and try to fill the entire range */
 
-	if (!USES_PPGTT(dev_priv))
+	if (!HAS_PPGTT(dev_priv))
 		return 0;
 
 	ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 	IGT_TIMEOUT(end_time);
 	int err;
 
-	if (!USES_FULL_PPGTT(dev_priv))
+	if (!HAS_FULL_PPGTT(dev_priv))
 		return 0;
 
 	file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 0000000..208a966
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	pr_debug("%s: current gpu_error=%08lx\n",
+		 __func__, i915->gpu_error.flags);
+
+	while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+		wait_event(i915->gpu_error.reset_queue,
+			   !test_bit(I915_RESET_BACKOFF,
+				     &i915->gpu_error.flags));
+
+	for_each_engine(engine, i915, id) {
+		while (test_and_set_bit(I915_RESET_ENGINE + id,
+					&i915->gpu_error.flags))
+			wait_on_bit(&i915->gpu_error.flags,
+				    I915_RESET_ENGINE + id,
+				    TASK_UNINTERRUPTIBLE);
+	}
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, i915, id)
+		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+	wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 0000000..5f0234d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 0000000..8cd34f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+	unsigned int mode;
+	void *vaddr;
+	int err;
+
+	GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+	memset(spin, 0, sizeof(*spin));
+	spin->i915 = i915;
+
+	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->hws)) {
+		err = PTR_ERR(spin->hws);
+		goto err;
+	}
+
+	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->obj)) {
+		err = PTR_ERR(spin->obj);
+		goto err_hws;
+	}
+
+	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+	mode = i915_coherent_map_type(i915);
+	vaddr = i915_gem_object_pin_map(spin->obj, mode);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_unpin_hws;
+	}
+	spin->batch = vaddr;
+
+	return 0;
+
+err_unpin_hws:
+	i915_gem_object_unpin_map(spin->hws);
+err_obj:
+	i915_gem_object_put(spin->obj);
+err_hws:
+	i915_gem_object_put(spin->hws);
+err:
+	return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+	return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+		       const struct i915_request *rq)
+{
+	return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+			      struct i915_request *rq,
+			      u32 arbitration_command)
+{
+	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+	struct i915_vma *hws, *vma;
+	u32 *batch;
+	int err;
+
+	vma = i915_vma_instance(spin->obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	hws = i915_vma_instance(spin->hws, vm, NULL);
+	if (IS_ERR(hws))
+		return PTR_ERR(hws);
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		return err;
+
+	err = i915_vma_pin(hws, 0, 0, PIN_USER);
+	if (err)
+		goto unpin_vma;
+
+	err = i915_vma_move_to_active(vma, rq, 0);
+	if (err)
+		goto unpin_hws;
+
+	if (!i915_gem_object_has_active_reference(vma->obj)) {
+		i915_gem_object_get(vma->obj);
+		i915_gem_object_set_active_reference(vma->obj);
+	}
+
+	err = i915_vma_move_to_active(hws, rq, 0);
+	if (err)
+		goto unpin_hws;
+
+	if (!i915_gem_object_has_active_reference(hws->obj)) {
+		i915_gem_object_get(hws->obj);
+		i915_gem_object_set_active_reference(hws->obj);
+	}
+
+	batch = spin->batch;
+
+	*batch++ = MI_STORE_DWORD_IMM_GEN4;
+	*batch++ = lower_32_bits(hws_address(hws, rq));
+	*batch++ = upper_32_bits(hws_address(hws, rq));
+	*batch++ = rq->fence.seqno;
+
+	*batch++ = arbitration_command;
+
+	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+	*batch++ = lower_32_bits(vma->node.start);
+	*batch++ = upper_32_bits(vma->node.start);
+	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+	i915_gem_chipset_flush(spin->i915);
+
+	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+	i915_vma_unpin(hws);
+unpin_vma:
+	i915_vma_unpin(vma);
+	return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine,
+			   u32 arbitration_command)
+{
+	struct i915_request *rq;
+	int err;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq))
+		return rq;
+
+	err = emit_recurse_batch(spin, rq, arbitration_command);
+	if (err) {
+		i915_request_add(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+	return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+	*spin->batch = MI_BATCH_BUFFER_END;
+	i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+	igt_spinner_end(spin);
+
+	i915_gem_object_unpin_map(spin->obj);
+	i915_gem_object_put(spin->obj);
+
+	i915_gem_object_unpin_map(spin->hws);
+	i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+	if (!wait_event_timeout(rq->execute,
+				READ_ONCE(rq->global_seqno),
+				msecs_to_jiffies(10)))
+		return false;
+
+	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+					       rq->fence.seqno),
+			     10) &&
+		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+					    rq->fence.seqno),
+			  1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 0000000..391777c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+	struct drm_i915_private *i915;
+	struct drm_i915_gem_object *hws;
+	struct drm_i915_gem_object *obj;
+	u32 *batch;
+	void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine,
+			   u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82..32cba4c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
 	 * Get rid of clients created during driver load because the test will
 	 * recreate them.
 	 */
+	guc_clients_disable(guc);
 	guc_clients_destroy(guc);
 	if (guc->execbuf_client || guc->preempt_client) {
 		pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
 		goto out;
 	}
 
-	/* Now create the doorbells */
-	guc_clients_doorbell_init(guc);
+	/* Now enable the clients */
+	guc_clients_enable(guc);
 
 	/* each client should now have received a doorbell */
 	if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
 	 * Basic test - an attempt to reallocate a valid doorbell to the
 	 * client it is currently assigned should not cause a failure.
 	 */
-	err = guc_clients_doorbell_init(guc);
-	if (err)
-		goto out;
-
-	/*
-	 * Negative test - a client with no doorbell (invalid db id).
-	 * After destroying the doorbell, the db id is changed to
-	 * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
-	 * allocate a doorbell with an invalid id (db has to be reserved before
-	 * allocation).
-	 */
-	destroy_doorbell(guc->execbuf_client);
-	if (client_doorbell_in_sync(guc->execbuf_client)) {
-		pr_err("destroy db did not work\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	unreserve_doorbell(guc->execbuf_client);
-
-	__create_doorbell(guc->execbuf_client);
-	err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
-	if (err != -EIO) {
-		pr_err("unexpected (err = %d)", err);
-		goto out_db;
-	}
-
-	if (!available_dbs(guc, guc->execbuf_client->priority)) {
-		pr_err("doorbell not available when it should\n");
-		err = -EIO;
-		goto out_db;
-	}
-
-out_db:
-	/* clean after test */
-	__destroy_doorbell(guc->execbuf_client);
-	err = reserve_doorbell(guc->execbuf_client);
-	if (err) {
-		pr_err("failed to reserve back the doorbell back\n");
-	}
 	err = create_doorbell(guc->execbuf_client);
-	if (err) {
-		pr_err("recreate doorbell failed\n");
-		goto out;
-	}
 
 out:
 	/*
 	 * Leave clean state for other test, plus the driver always destroy the
 	 * clients during unload.
 	 */
-	destroy_doorbell(guc->execbuf_client);
-	if (guc->preempt_client)
-		destroy_doorbell(guc->preempt_client);
+	guc_clients_disable(guc);
 	guc_clients_destroy(guc);
 	guc_clients_create(guc);
-	guc_clients_doorbell_init(guc);
+	guc_clients_enable(guc);
 unlock:
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
 
 		db_id = clients[i]->doorbell_id;
 
-		err = create_doorbell(clients[i]);
+		err = __guc_client_enable(clients[i]);
 		if (err) {
 			pr_err("[%d] Failed to create a doorbell\n", i);
 			goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
 out:
 	for (i = 0; i < ATTEMPTS; i++)
 		if (!IS_ERR_OR_NULL(clients[i])) {
-			destroy_doorbell(clients[i]);
+			__guc_client_disable(clients[i]);
 			guc_client_free(clients[i]);
 		}
 unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db37822..40efbed 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
 #include "../i915_selftest.h"
 #include "i915_random.h"
 #include "igt_flush_test.h"
+#include "igt_reset.h"
 #include "igt_wedge_me.h"
 
 #include "mock_context.h"
@@ -76,7 +77,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
 	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
 
 	vaddr = i915_gem_object_pin_map(h->obj,
-					HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+					i915_coherent_map_type(i915));
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
 		goto err_unpin_hws;
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 			return ERR_CAST(obj);
 
 		vaddr = i915_gem_object_pin_map(obj,
-						HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+						i915_coherent_map_type(h->i915));
 		if (IS_ERR(vaddr)) {
 			i915_gem_object_put(obj);
 			return ERR_CAST(vaddr);
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
 		goto unlock;
 
 	for_each_engine(engine, i915, id) {
+		struct igt_wedge_me w;
 		long timeout;
 
 		if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
 
 		i915_request_add(rq);
 
-		timeout = i915_request_wait(rq,
-					    I915_WAIT_LOCKED,
-					    MAX_SCHEDULE_TIMEOUT);
+		timeout = 0;
+		igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+			timeout = i915_request_wait(rq,
+						    I915_WAIT_LOCKED,
+						    MAX_SCHEDULE_TIMEOUT);
+		if (i915_terminally_wedged(&i915->gpu_error))
+			timeout = -EIO;
+
 		i915_request_put(rq);
 
 		if (timeout < 0) {
@@ -348,40 +355,6 @@ static int igt_hang_sanitycheck(void *arg)
 	return err;
 }
 
-static void global_reset_lock(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	pr_debug("%s: current gpu_error=%08lx\n",
-		 __func__, i915->gpu_error.flags);
-
-	while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
-		wait_event(i915->gpu_error.reset_queue,
-			   !test_bit(I915_RESET_BACKOFF,
-				     &i915->gpu_error.flags));
-
-	for_each_engine(engine, i915, id) {
-		while (test_and_set_bit(I915_RESET_ENGINE + id,
-					&i915->gpu_error.flags))
-			wait_on_bit(&i915->gpu_error.flags,
-				    I915_RESET_ENGINE + id,
-				    TASK_UNINTERRUPTIBLE);
-	}
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	for_each_engine(engine, i915, id)
-		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
-	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
-	wake_up_all(&i915->gpu_error.reset_queue);
-}
-
 static int igt_global_reset(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
 
 	/* Check that we can issue a global GPU reset */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 	set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
 
 	/* Check that we detect a stuck waiter and issue a reset */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ static int igt_reset_wait(void *arg)
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 
 	/* Check that we can recover an unbind stuck on a hanging request */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -1150,6 +1123,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 		tsk = NULL;
 		goto out_reset;
 	}
+	get_task_struct(tsk);
 
 	wait_for_completion(&arg.completion);
 
@@ -1172,6 +1146,8 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 		/* The reset, even indirectly, should take less than 10ms. */
 		igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
 			err = kthread_stop(tsk);
+
+		put_task_struct(tsk);
 	}
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -1183,7 +1159,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
@@ -1263,7 +1239,7 @@ static int igt_reset_queue(void *arg)
 
 	/* Check that we replay pending requests following a hang */
 
-	global_reset_lock(i915);
+	igt_global_reset_lock(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -1394,7 +1370,7 @@ static int igt_reset_queue(void *arg)
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	global_reset_unlock(i915);
+	igt_global_reset_unlock(i915);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8..ca461e3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,215 +6,18 @@
 
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
+#include "igt_spinner.h"
+#include "i915_random.h"
 
 #include "mock_context.h"
 
-struct spinner {
-	struct drm_i915_private *i915;
-	struct drm_i915_gem_object *hws;
-	struct drm_i915_gem_object *obj;
-	u32 *batch;
-	void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
-	unsigned int mode;
-	void *vaddr;
-	int err;
-
-	GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
-	memset(spin, 0, sizeof(*spin));
-	spin->i915 = i915;
-
-	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(spin->hws)) {
-		err = PTR_ERR(spin->hws);
-		goto err;
-	}
-
-	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(spin->obj)) {
-		err = PTR_ERR(spin->obj);
-		goto err_hws;
-	}
-
-	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
-	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_obj;
-	}
-	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
-	mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
-	vaddr = i915_gem_object_pin_map(spin->obj, mode);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_unpin_hws;
-	}
-	spin->batch = vaddr;
-
-	return 0;
-
-err_unpin_hws:
-	i915_gem_object_unpin_map(spin->hws);
-err_obj:
-	i915_gem_object_put(spin->obj);
-err_hws:
-	i915_gem_object_put(spin->hws);
-err:
-	return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
-	return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
-		       const struct i915_request *rq)
-{
-	return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
-			      struct i915_request *rq,
-			      u32 arbitration_command)
-{
-	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
-	struct i915_vma *hws, *vma;
-	u32 *batch;
-	int err;
-
-	vma = i915_vma_instance(spin->obj, vm, NULL);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
-	hws = i915_vma_instance(spin->hws, vm, NULL);
-	if (IS_ERR(hws))
-		return PTR_ERR(hws);
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err)
-		return err;
-
-	err = i915_vma_pin(hws, 0, 0, PIN_USER);
-	if (err)
-		goto unpin_vma;
-
-	err = i915_vma_move_to_active(vma, rq, 0);
-	if (err)
-		goto unpin_hws;
-
-	if (!i915_gem_object_has_active_reference(vma->obj)) {
-		i915_gem_object_get(vma->obj);
-		i915_gem_object_set_active_reference(vma->obj);
-	}
-
-	err = i915_vma_move_to_active(hws, rq, 0);
-	if (err)
-		goto unpin_hws;
-
-	if (!i915_gem_object_has_active_reference(hws->obj)) {
-		i915_gem_object_get(hws->obj);
-		i915_gem_object_set_active_reference(hws->obj);
-	}
-
-	batch = spin->batch;
-
-	*batch++ = MI_STORE_DWORD_IMM_GEN4;
-	*batch++ = lower_32_bits(hws_address(hws, rq));
-	*batch++ = upper_32_bits(hws_address(hws, rq));
-	*batch++ = rq->fence.seqno;
-
-	*batch++ = arbitration_command;
-
-	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-	*batch++ = lower_32_bits(vma->node.start);
-	*batch++ = upper_32_bits(vma->node.start);
-	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
-	i915_gem_chipset_flush(spin->i915);
-
-	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
-	i915_vma_unpin(hws);
-unpin_vma:
-	i915_vma_unpin(vma);
-	return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
-		       struct i915_gem_context *ctx,
-		       struct intel_engine_cs *engine,
-		       u32 arbitration_command)
-{
-	struct i915_request *rq;
-	int err;
-
-	rq = i915_request_alloc(engine, ctx);
-	if (IS_ERR(rq))
-		return rq;
-
-	err = emit_recurse_batch(spin, rq, arbitration_command);
-	if (err) {
-		i915_request_add(rq);
-		return ERR_PTR(err);
-	}
-
-	return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
-	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
-	return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
-	*spin->batch = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
-	spinner_end(spin);
-
-	i915_gem_object_unpin_map(spin->obj);
-	i915_gem_object_put(spin->obj);
-
-	i915_gem_object_unpin_map(spin->hws);
-	i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
-	if (!wait_event_timeout(rq->execute,
-				READ_ONCE(rq->global_seqno),
-				msecs_to_jiffies(10)))
-		return false;
-
-	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
-					       rq->fence.seqno),
-			     10) &&
-		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
-					    rq->fence.seqno),
-			  1000));
-}
-
 static int live_sanitycheck(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	struct spinner spin;
+	struct igt_spinner spin;
 	int err = -ENOMEM;
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin, i915))
+	if (igt_spinner_init(&spin, i915))
 		goto err_unlock;
 
 	ctx = kernel_context(i915);
@@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin, rq)) {
+		if (!igt_wait_for_spinner(&spin, rq)) {
 			GEM_TRACE("spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg)
 			goto err_ctx;
 		}
 
-		spinner_end(&spin);
+		igt_spinner_end(&spin);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx;
@@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg)
 err_ctx:
 	kernel_context_close(ctx);
 err_spin:
-	spinner_fini(&spin);
+	igt_spinner_fini(&spin);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -271,7 +74,7 @@ static int live_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
@@ -282,34 +85,36 @@ static int live_preempt(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			GEM_TRACE("lo spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -317,16 +122,16 @@ static int live_preempt(void *arg)
 			goto err_ctx_lo;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			GEM_TRACE("hi spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -334,8 +139,8 @@ static int live_preempt(void *arg)
 			goto err_ctx_lo;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -348,9 +153,9 @@ static int live_preempt(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -362,7 +167,7 @@ static int live_late_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
@@ -374,10 +179,10 @@ static int live_late_preempt(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
@@ -391,43 +196,44 @@ static int live_late_preempt(void *arg)
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			pr_err("First context failed to start\n");
 			goto err_wedged;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_NOOP);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (wait_for_spinner(&spin_hi, rq)) {
+		if (igt_wait_for_spinner(&spin_hi, rq)) {
 			pr_err("Second context overtook first?\n");
 			goto err_wedged;
 		}
 
-		attr.priority = I915_PRIORITY_MAX;
+		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
 		engine->schedule(rq, &attr);
 
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
 			GEM_TRACE_DUMP();
 			goto err_wedged;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -440,9 +246,9 @@ static int live_late_preempt(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -450,8 +256,8 @@ static int live_late_preempt(void *arg)
 	return err;
 
 err_wedged:
-	spinner_end(&spin_hi);
-	spinner_end(&spin_lo);
+	igt_spinner_end(&spin_hi);
+	igt_spinner_end(&spin_lo);
 	i915_gem_set_wedged(i915);
 	err = -EIO;
 	goto err_ctx_lo;
@@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct spinner spin_hi, spin_lo;
+	struct igt_spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
@@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 
-	if (spinner_init(&spin_hi, i915))
+	if (igt_spinner_init(&spin_hi, i915))
 		goto err_unlock;
 
-	if (spinner_init(&spin_lo, i915))
+	if (igt_spinner_init(&spin_lo, i915))
 		goto err_spin_hi;
 
 	ctx_hi = kernel_context(i915);
@@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg)
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
 
 		i915_request_add(rq);
-		if (!wait_for_spinner(&spin_lo, rq)) {
+		if (!igt_wait_for_spinner(&spin_lo, rq)) {
 			GEM_TRACE("lo spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg)
 			goto err_ctx_lo;
 		}
 
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
+		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
-			spinner_end(&spin_lo);
+			igt_spinner_end(&spin_lo);
 			err = PTR_ERR(rq);
 			goto err_ctx_lo;
 		}
@@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg)
 
 		engine->execlists.preempt_hang.inject_hang = false;
 
-		if (!wait_for_spinner(&spin_hi, rq)) {
+		if (!igt_wait_for_spinner(&spin_hi, rq)) {
 			GEM_TRACE("hi spinner failed to start\n");
 			GEM_TRACE_DUMP();
 			i915_gem_set_wedged(i915);
@@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg)
 			goto err_ctx_lo;
 		}
 
-		spinner_end(&spin_hi);
-		spinner_end(&spin_lo);
+		igt_spinner_end(&spin_hi);
+		igt_spinner_end(&spin_lo);
 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
@@ -563,9 +369,9 @@ static int live_preempt_hang(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_spin_lo:
-	spinner_fini(&spin_lo);
+	igt_spinner_fini(&spin_lo);
 err_spin_hi:
-	spinner_fini(&spin_hi);
+	igt_spinner_fini(&spin_hi);
 err_unlock:
 	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(i915);
@@ -573,6 +379,261 @@ static int live_preempt_hang(void *arg)
 	return err;
 }
 
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+	return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+	return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+	struct drm_i915_private *i915;
+	struct i915_gem_context **contexts;
+	struct intel_engine_cs *engine;
+	struct drm_i915_gem_object *batch;
+	unsigned int ncontext;
+	struct rnd_state prng;
+	unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+	return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+							  &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+			struct i915_gem_context *ctx, int prio,
+			struct drm_i915_gem_object *batch)
+{
+	struct i915_request *rq;
+	struct i915_vma *vma = NULL;
+	int err = 0;
+
+	if (batch) {
+		vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
+		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		if (err)
+			return err;
+	}
+
+	ctx->sched.priority = prio;
+
+	rq = i915_request_alloc(smoke->engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto unpin;
+	}
+
+	if (vma) {
+		err = rq->engine->emit_bb_start(rq,
+						vma->node.start,
+						PAGE_SIZE, 0);
+		if (!err)
+			err = i915_vma_move_to_active(vma, rq, 0);
+	}
+
+	i915_request_add(rq);
+
+unpin:
+	if (vma)
+		i915_vma_unpin(vma);
+
+	return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_gem_context *ctx = smoke_context(smoke);
+		int err;
+
+		mutex_lock(&smoke->i915->drm.struct_mutex);
+		err = smoke_submit(smoke,
+				   ctx, count % I915_PRIORITY_MAX,
+				   smoke->batch);
+		mutex_unlock(&smoke->i915->drm.struct_mutex);
+		if (err)
+			return err;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	smoke->count = count;
+	return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+	struct task_struct *tsk[I915_NUM_ENGINES] = {};
+	struct preempt_smoke arg[I915_NUM_ENGINES];
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned long count;
+	int err = 0;
+
+	mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+	for_each_engine(engine, smoke->i915, id) {
+		arg[id] = *smoke;
+		arg[id].engine = engine;
+		if (!(flags & BATCH))
+			arg[id].batch = NULL;
+		arg[id].count = 0;
+
+		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+				      "igt/smoke:%d", id);
+		if (IS_ERR(tsk[id])) {
+			err = PTR_ERR(tsk[id]);
+			break;
+		}
+		get_task_struct(tsk[id]);
+	}
+
+	count = 0;
+	for_each_engine(engine, smoke->i915, id) {
+		int status;
+
+		if (IS_ERR_OR_NULL(tsk[id]))
+			continue;
+
+		status = kthread_stop(tsk[id]);
+		if (status && !err)
+			err = status;
+
+		count += arg[id].count;
+
+		put_task_struct(tsk[id]);
+	}
+
+	mutex_lock(&smoke->i915->drm.struct_mutex);
+
+	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+		count, flags,
+		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+	return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+	enum intel_engine_id id;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		for_each_engine(smoke->engine, smoke->i915, id) {
+			struct i915_gem_context *ctx = smoke_context(smoke);
+			int err;
+
+			err = smoke_submit(smoke,
+					   ctx, random_priority(&smoke->prng),
+					   flags & BATCH ? smoke->batch : NULL);
+			if (err)
+				return err;
+
+			count++;
+		}
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+		count, flags,
+		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+	return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+	struct preempt_smoke smoke = {
+		.i915 = arg,
+		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+		.ncontext = 1024,
+	};
+	const unsigned int phase[] = { 0, BATCH };
+	int err = -ENOMEM;
+	u32 *cs;
+	int n;
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+		return 0;
+
+	smoke.contexts = kmalloc_array(smoke.ncontext,
+				       sizeof(*smoke.contexts),
+				       GFP_KERNEL);
+	if (!smoke.contexts)
+		return -ENOMEM;
+
+	mutex_lock(&smoke.i915->drm.struct_mutex);
+	intel_runtime_pm_get(smoke.i915);
+
+	smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+	if (IS_ERR(smoke.batch)) {
+		err = PTR_ERR(smoke.batch);
+		goto err_unlock;
+	}
+
+	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_batch;
+	}
+	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+		cs[n] = MI_ARB_CHECK;
+	cs[n] = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(smoke.batch);
+
+	err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+	if (err)
+		goto err_batch;
+
+	for (n = 0; n < smoke.ncontext; n++) {
+		smoke.contexts[n] = kernel_context(smoke.i915);
+		if (!smoke.contexts[n])
+			goto err_ctx;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(phase); n++) {
+		err = smoke_crescendo(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
+
+		err = smoke_random(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
+	}
+
+err_ctx:
+	if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+		err = -EIO;
+
+	for (n = 0; n < smoke.ncontext; n++) {
+		if (!smoke.contexts[n])
+			break;
+		kernel_context_close(smoke.contexts[n]);
+	}
+
+err_batch:
+	i915_gem_object_put(smoke.batch);
+err_unlock:
+	intel_runtime_pm_put(smoke.i915);
+	mutex_unlock(&smoke.i915->drm.struct_mutex);
+	kfree(smoke.contexts);
+
+	return err;
+}
+
 int intel_execlists_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
@@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
 		SUBTEST(live_preempt_hang),
+		SUBTEST(live_preempt_smoke),
 	};
 
 	if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923..67017d5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
 
 #include "../i915_selftest.h"
 
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
 #include "igt_wedge_me.h"
 #include "mock_context.h"
 
@@ -91,17 +94,23 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	return ERR_PTR(err);
 }
 
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
 {
-	return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+	i915_reg_t reg = i < engine->whitelist.count ?
+			 engine->whitelist.list[i].reg :
+			 RING_NOPID(engine->mmio_base);
+
+	return i915_mmio_reg_offset(reg);
 }
 
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
 {
 	unsigned int i;
 
 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
-		u32 expected = get_whitelist_reg(w, i);
+		u32 expected = get_whitelist_reg(engine, i);
 		u32 actual = results[i];
 
 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
 	}
 }
 
-static int check_whitelist(const struct whitelist *w,
-			   struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
 			   struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
 	}
 
 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
-		u32 expected = get_whitelist_reg(w, i);
+		u32 expected = get_whitelist_reg(engine, i);
 		u32 actual = vaddr[i];
 
 		if (expected != actual) {
-			print_results(w, vaddr);
+			print_results(engine, vaddr);
 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
 			       i, expected, actual);
 
@@ -159,66 +167,107 @@ static int check_whitelist(const struct whitelist *w,
 
 static int do_device_reset(struct intel_engine_cs *engine)
 {
-	i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+	set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+	i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
 	return 0;
 }
 
 static int do_engine_reset(struct intel_engine_cs *engine)
 {
-	return i915_reset_engine(engine, NULL);
+	return i915_reset_engine(engine, "live_workarounds");
 }
 
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+			  struct igt_spinner *spin)
 {
 	struct i915_gem_context *ctx;
 	struct i915_request *rq;
+	int err = 0;
 
 	ctx = kernel_context(engine->i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
 	intel_runtime_pm_get(engine->i915);
-	rq = i915_request_alloc(engine, ctx);
+
+	if (spin)
+		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+	else
+		rq = i915_request_alloc(engine, ctx);
+
 	intel_runtime_pm_put(engine->i915);
 
 	kernel_context_close(ctx);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+
+	if (IS_ERR(rq)) {
+		spin = NULL;
+		err = PTR_ERR(rq);
+		goto err;
+	}
 
 	i915_request_add(rq);
 
-	return 0;
+	if (spin && !igt_wait_for_spinner(spin, rq)) {
+		pr_err("Spinner failed to start\n");
+		err = -ETIMEDOUT;
+	}
+
+err:
+	if (err && spin)
+		igt_spinner_end(spin);
+
+	return err;
 }
 
 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 					int (*reset)(struct intel_engine_cs *),
-					const struct whitelist *w,
 					const char *name)
 {
+	struct drm_i915_private *i915 = engine->i915;
+	bool want_spin = reset == do_engine_reset;
 	struct i915_gem_context *ctx;
+	struct igt_spinner spin;
 	int err;
 
-	ctx = kernel_context(engine->i915);
+	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+		engine->whitelist.count, name);
+
+	if (want_spin) {
+		err = igt_spinner_init(&spin, i915);
+		if (err)
+			return err;
+	}
+
+	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *before* %s reset!\n", name);
 		goto out;
 	}
 
-	err = switch_to_scratch_context(engine);
+	err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
 	if (err)
 		goto out;
 
+	intel_runtime_pm_get(i915);
 	err = reset(engine);
+	intel_runtime_pm_put(i915);
+
+	if (want_spin) {
+		igt_spinner_end(&spin);
+		igt_spinner_fini(&spin);
+	}
+
 	if (err) {
 		pr_err("%s reset failed\n", name);
 		goto out;
 	}
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Whitelist not preserved in context across %s reset!\n",
 		       name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 
 	kernel_context_close(ctx);
 
-	ctx = kernel_context(engine->i915);
+	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	err = check_whitelist(w, ctx, engine);
+	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
 		       name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine = i915->engine[RCS];
-	struct i915_gpu_error *error = &i915->gpu_error;
-	struct whitelist w;
 	int err = 0;
 
 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
 
-	if (!engine)
+	if (!engine || engine->whitelist.count == 0)
 		return 0;
 
-	if (!whitelist_build(engine, &w))
-		return 0;
-
-	pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
-	set_bit(I915_RESET_BACKOFF, &error->flags);
-	set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+	igt_global_reset_lock(i915);
 
 	if (intel_has_reset_engine(i915)) {
 		err = check_whitelist_across_reset(engine,
-						   do_engine_reset, &w,
+						   do_engine_reset,
 						   "engine");
 		if (err)
 			goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
 
 	if (intel_has_gpu_reset(i915)) {
 		err = check_whitelist_across_reset(engine,
-						   do_device_reset, &w,
+						   do_device_reset,
 						   "device");
 		if (err)
 			goto out;
 	}
 
 out:
-	clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
-	clear_bit(I915_RESET_BACKOFF, &error->flags);
+	igt_global_reset_unlock(i915);
 	return err;
 }
 
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	bool ok = true;
+
+	ok &= intel_gt_verify_workarounds(i915, str);
+
+	for_each_engine(engine, i915, id)
+		ok &= intel_engine_verify_workarounds(engine, str);
+
+	return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gpu_error *error = &i915->gpu_error;
+	bool ok;
+
+	if (!intel_has_gpu_reset(i915))
+		return 0;
+
+	pr_info("Verifying after GPU reset...\n");
+
+	igt_global_reset_lock(i915);
+
+	ok = verify_gt_engine_wa(i915, "before reset");
+	if (!ok)
+		goto out;
+
+	intel_runtime_pm_get(i915);
+	set_bit(I915_RESET_HANDOFF, &error->flags);
+	i915_reset(i915, ALL_ENGINES, "live_workarounds");
+	intel_runtime_pm_put(i915);
+
+	ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+	igt_global_reset_unlock(i915);
+
+	return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	struct igt_spinner spin;
+	enum intel_engine_id id;
+	struct i915_request *rq;
+	int ret = 0;
+
+	if (!intel_has_reset_engine(i915))
+		return 0;
+
+	ctx = kernel_context(i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	igt_global_reset_lock(i915);
+
+	for_each_engine(engine, i915, id) {
+		bool ok;
+
+		pr_info("Verifying after %s reset...\n", engine->name);
+
+		ok = verify_gt_engine_wa(i915, "before reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+
+		intel_runtime_pm_get(i915);
+		i915_reset_engine(engine, "live_workarounds");
+		intel_runtime_pm_put(i915);
+
+		ok = verify_gt_engine_wa(i915, "after idle reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+
+		ret = igt_spinner_init(&spin, i915);
+		if (ret)
+			goto err;
+
+		intel_runtime_pm_get(i915);
+
+		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			ret = PTR_ERR(rq);
+			igt_spinner_fini(&spin);
+			intel_runtime_pm_put(i915);
+			goto err;
+		}
+
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			pr_err("Spinner failed to start\n");
+			igt_spinner_fini(&spin);
+			intel_runtime_pm_put(i915);
+			ret = -ETIMEDOUT;
+			goto err;
+		}
+
+		i915_reset_engine(engine, "live_workarounds");
+
+		intel_runtime_pm_put(i915);
+
+		igt_spinner_end(&spin);
+		igt_spinner_fini(&spin);
+
+		ok = verify_gt_engine_wa(i915, "after busy reset");
+		if (!ok) {
+			ret = -ESRCH;
+			goto err;
+		}
+	}
+
+err:
+	igt_global_reset_unlock(i915);
+	kernel_context_close(ctx);
+
+	igt_flush_test(i915, I915_WAIT_LOCKED);
+
+	return ret;
+}
+
 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_reset_whitelist),
+		SUBTEST(live_gpu_reset_gt_engine_workarounds),
+		SUBTEST(live_engine_reset_gt_engine_workarounds),
 	};
 	int err;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da..d0c44c1 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.submit_request = mock_submit_request;
 
 	i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
-	lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+	i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
 
 	intel_engine_init_breadcrumbs(&engine->base);
 	engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35..361e962 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
 	.transfer = intel_dsi_host_transfer,
 };
 
-static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
-						  enum port port)
-{
-	struct intel_dsi_host *host;
-	struct mipi_dsi_device *device;
-
-	host = kzalloc(sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return NULL;
-
-	host->base.ops = &intel_dsi_host_ops;
-	host->intel_dsi = intel_dsi;
-	host->port = port;
-
-	/*
-	 * We should call mipi_dsi_host_register(&host->base) here, but we don't
-	 * have a host->dev, and we don't have OF stuff either. So just use the
-	 * dsi framework as a library and hope for the best. Create the dsi
-	 * devices by ourselves here too. Need to be careful though, because we
-	 * don't initialize any of the driver model devices here.
-	 */
-	device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (!device) {
-		kfree(host);
-		return NULL;
-	}
-
-	device->host = &host->base;
-	host->device = device;
-
-	return host;
-}
-
 /*
  * send a video mode command
  *
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
-{
-	return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
-}
-
-static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
-{
-	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config,
 				     struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 	int ret;
 
 	DRM_DEBUG_KMS("\n");
+	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
 	if (fixed_mode) {
 		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 			      const struct intel_crtc_state *pipe_config);
 static void intel_dsi_unprepare(struct intel_encoder *encoder);
 
-static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
-
-	/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
-	if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
-		return;
-
-	msleep(msec);
-}
-
 /*
  * Panel enable/disable sequences from the VBT spec.
  *
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
  * - wait t4                                           - wait t4
  */
 
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
 static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *pipe_config,
 				 const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 }
 
 /*
- * DSI port enable has to be done before pipe and plane enable, so we do it in
- * the pre_enable hook.
- */
-static void intel_dsi_enable_nop(struct intel_encoder *encoder,
-				 const struct intel_crtc_state *pipe_config,
-				 const struct drm_connector_state *conn_state)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
-/*
  * DSI port disable has to be done after pipe and plane disable, so we do it in
  * the post_disable hook.
  */
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 	}
 }
 
-static enum drm_mode_status
-intel_dsi_mode_valid(struct drm_connector *connector,
-		     struct drm_display_mode *mode)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
-	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-
-	DRM_DEBUG_KMS("\n");
-
-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-		return MODE_NO_DBLESCAN;
-
-	if (fixed_mode) {
-		if (mode->hdisplay > fixed_mode->hdisplay)
-			return MODE_PANEL;
-		if (mode->vdisplay > fixed_mode->vdisplay)
-			return MODE_PANEL;
-		if (fixed_mode->clock > max_dotclk)
-			return MODE_CLOCK_HIGH;
-	}
-
-	return MODE_OK;
-}
-
 /* return txclkesc cycles in terms of divider and duration in us */
 static u16 txclkesc(u32 divider, unsigned int us)
 {
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
 	}
 }
 
-static int intel_dsi_get_modes(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct drm_display_mode *mode;
-
-	DRM_DEBUG_KMS("\n");
-
-	if (!intel_connector->panel.fixed_mode) {
-		DRM_DEBUG_KMS("no fixed mode\n");
-		return 0;
-	}
-
-	mode = drm_mode_duplicate(connector->dev,
-				  intel_connector->panel.fixed_mode);
-	if (!mode) {
-		DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
-		return 0;
-	}
-
-	drm_mode_probed_add(connector, mode);
-	return 1;
-}
-
-static void intel_dsi_connector_destroy(struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-
-	DRM_DEBUG_KMS("\n");
-	intel_panel_fini(&intel_connector->panel);
-	drm_connector_cleanup(connector);
-	kfree(connector);
-}
-
 static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
-	.destroy = intel_dsi_connector_destroy,
+	.destroy = intel_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_get_property = intel_digital_connector_atomic_get_property,
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
 
-static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+static enum drm_panel_orientation
+vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-	enum i9xx_plane_id i9xx_plane;
+	struct intel_encoder *encoder = connector->encoder;
+	enum intel_display_power_domain power_domain;
+	enum drm_panel_orientation orientation;
+	struct intel_plane *plane;
+	struct intel_crtc *crtc;
+	enum pipe pipe;
 	u32 val;
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		if (connector->encoder->crtc_mask == BIT(PIPE_B))
-			i9xx_plane = PLANE_B;
-		else
-			i9xx_plane = PLANE_A;
+	if (!encoder->get_hw_state(encoder, &pipe))
+		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
 
-		val = I915_READ(DSPCNTR(i9xx_plane));
-		if (val & DISPPLANE_ROTATE_180)
-			orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
-	}
+	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	plane = to_intel_plane(crtc->base.primary);
+
+	power_domain = POWER_DOMAIN_PIPE(pipe);
+	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+	val = I915_READ(DSPCNTR(plane->i9xx_plane));
+
+	if (!(val & DISPLAY_PLANE_ENABLE))
+		orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+	else if (val & DISPPLANE_ROTATE_180)
+		orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+	else
+		orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+
+	intel_display_power_put(dev_priv, power_domain);
 
 	return orientation;
 }
 
+static enum drm_panel_orientation
+vlv_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	enum drm_panel_orientation orientation;
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		orientation = vlv_dsi_get_hw_panel_orientation(connector);
+		if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+			return orientation;
+	}
+
+	return intel_dsi_get_panel_orientation(connector);
+}
+
 static void intel_dsi_add_properties(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
 		connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
 
 		connector->base.display_info.panel_orientation =
-			intel_dsi_get_panel_orientation(connector);
+			vlv_dsi_get_panel_orientation(connector);
 		drm_connector_init_panel_orientation_property(
 				&connector->base,
 				connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
-	intel_encoder->enable = intel_dsi_enable_nop;
 	intel_encoder->disable = intel_dsi_disable;
 	intel_encoder->post_disable = intel_dsi_post_disable;
 	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	for_each_dsi_port(port, intel_dsi->ports) {
 		struct intel_dsi_host *host;
 
-		host = intel_dsi_host_init(intel_dsi, port);
+		host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+					   port);
 		if (!host)
 			goto err;
 
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index fe6becd..77a26fd 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
  *
  * derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #include <linux/module.h>
 #include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 0e6942f..820c7e3 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Freescale i.MX drm driver
  *
  * Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 #include <linux/component.h>
 #include <linux/device.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3bd0f8a..2c5bbe3 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * i.MX drm driver - LVDS display bridge
  *
  * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd331..293dd57 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * i.MX drm driver - Television Encoder (TVEv2)
  *
  * Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/clk.h>
@@ -442,7 +434,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
 	return 0;
 }
 
-static struct clk_ops clk_tve_di_ops = {
+static const struct clk_ops clk_tve_di_ops = {
 	.round_rate = clk_tve_di_round_rate,
 	.set_rate = clk_tve_di_set_rate,
 	.recalc_rate = clk_tve_di_recalc_rate,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710..058b53c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * i.MX IPUv3 Graphics driver
  *
  * Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 #include <linux/component.h>
 #include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 40605fd..c390924 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * i.MX IPUv3 DP Overlay Planes
  *
  * Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <drm/drmP.h>
@@ -236,9 +228,15 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
 
 void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
 {
+	int ret;
+
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-	ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+	ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+	if (ret == -ETIMEDOUT) {
+		DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+			  ipu_plane->base.base.id);
+	}
 
 	if (ipu_plane->dp && disable_dp_channel)
 		ipu_dp_disable_channel(ipu_plane->dp, false);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aefd04e..f3ce511 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * i.MX drm driver - parallel display implementation
  *
  * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/component.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 3ce51d8..c28b69f 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,6 +7,7 @@
 	select DRM_GEM_CMA_HELPER
 	select VIDEOMODE_HELPERS
 	select REGMAP_MMIO
+	select MESON_CANVAS
 
 config DRM_MESON_DW_HDMI
 	tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c5c4cc3..7709f2f 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
 meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
 
 obj-$(CONFIG_DRM_MESON) += meson-drm.o
 obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 08f6073..5de11aa 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -39,6 +39,7 @@
 #define CANVAS_WIDTH_HBIT       0
 #define CANVAS_HEIGHT_BIT       9
 #define CANVAS_BLKMODE_BIT      24
+#define CANVAS_ENDIAN_BIT	26
 #define DMC_CAV_LUT_ADDR	0x50 /* 0x14 offset in data sheet */
 #define CANVAS_LUT_WR_EN        (0x2 << 8)
 #define CANVAS_LUT_RD_EN        (0x1 << 8)
@@ -47,7 +48,8 @@ void meson_canvas_setup(struct meson_drm *priv,
 			uint32_t canvas_index, uint32_t addr,
 			uint32_t stride, uint32_t height,
 			unsigned int wrap,
-			unsigned int blkmode)
+			unsigned int blkmode,
+			unsigned int endian)
 {
 	unsigned int val;
 
@@ -60,7 +62,8 @@ void meson_canvas_setup(struct meson_drm *priv,
 						CANVAS_WIDTH_HBIT) |
 		(height << CANVAS_HEIGHT_BIT) |
 		(wrap << 22) |
-		(blkmode << CANVAS_BLKMODE_BIT));
+		(blkmode << CANVAS_BLKMODE_BIT) |
+		(endian << CANVAS_ENDIAN_BIT));
 
 	regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
 			CANVAS_LUT_WR_EN | canvas_index);
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
index af1759d..85dbf26 100644
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -23,6 +23,9 @@
 #define __MESON_CANVAS_H
 
 #define MESON_CANVAS_ID_OSD1	0x4e
+#define MESON_CANVAS_ID_VD1_0	0x60
+#define MESON_CANVAS_ID_VD1_1	0x61
+#define MESON_CANVAS_ID_VD1_2	0x62
 
 /* Canvas configuration. */
 #define MESON_CANVAS_WRAP_NONE	0x00
@@ -33,10 +36,16 @@
 #define	MESON_CANVAS_BLKMODE_32x32	0x01
 #define	MESON_CANVAS_BLKMODE_64x64	0x02
 
+#define MESON_CANVAS_ENDIAN_SWAP16	0x1
+#define MESON_CANVAS_ENDIAN_SWAP32	0x3
+#define MESON_CANVAS_ENDIAN_SWAP64	0x7
+#define MESON_CANVAS_ENDIAN_SWAP128	0xf
+
 void meson_canvas_setup(struct meson_drm *priv,
 			uint32_t canvas_index, uint32_t addr,
 			uint32_t stride, uint32_t height,
 			unsigned int wrap,
-			unsigned int blkmode);
+			unsigned int blkmode,
+			unsigned int endian);
 
 #endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 191b314..75d97f1 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/platform_device.h>
+#include <linux/bitfield.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -98,6 +99,10 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
 	writel(crtc_state->mode.hdisplay,
 	       priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
 
+	/* VD1 Preblend vertical start/end */
+	writel(FIELD_PREP(GENMASK(11, 0), 2303),
+			priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
 	writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
 			    priv->io_base + _REG(VPP_MISC));
 
@@ -126,13 +131,19 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	struct meson_drm *priv = meson_crtc->priv;
 
+	DRM_DEBUG_DRIVER("\n");
+
 	drm_crtc_vblank_off(crtc);
 
 	priv->viu.osd1_enabled = false;
 	priv->viu.osd1_commit = false;
 
+	priv->viu.vd1_enabled = false;
+	priv->viu.vd1_commit = false;
+
 	/* Disable VPP Postblend */
-	writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+	writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
+			    VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
 			    priv->io_base + _REG(VPP_MISC));
 
 	if (crtc->state->event && !crtc->state->active) {
@@ -172,6 +183,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
 	struct meson_drm *priv = meson_crtc->priv;
 
 	priv->viu.osd1_commit = true;
+	priv->viu.vd1_commit = true;
 }
 
 static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
@@ -200,26 +212,37 @@ void meson_crtc_irq(struct meson_drm *priv)
 				priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
 		writel_relaxed(priv->viu.osd1_blk0_cfg[4],
 				priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+		writel_relaxed(priv->viu.osd_sc_ctrl0,
+				priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+		writel_relaxed(priv->viu.osd_sc_i_wh_m1,
+				priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+		writel_relaxed(priv->viu.osd_sc_o_h_start_end,
+				priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+		writel_relaxed(priv->viu.osd_sc_o_v_start_end,
+				priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+		writel_relaxed(priv->viu.osd_sc_v_ini_phase,
+				priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+		writel_relaxed(priv->viu.osd_sc_v_phase_step,
+				priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+		writel_relaxed(priv->viu.osd_sc_h_ini_phase,
+				priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
+		writel_relaxed(priv->viu.osd_sc_h_phase_step,
+				priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
+		writel_relaxed(priv->viu.osd_sc_h_ctrl0,
+				priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+		writel_relaxed(priv->viu.osd_sc_v_ctrl0,
+				priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
 
-		/* If output is interlace, make use of the Scaler */
-		if (priv->viu.osd1_interlace) {
-			struct drm_plane *plane = priv->primary_plane;
-			struct drm_plane_state *state = plane->state;
-			struct drm_rect dest = {
-				.x1 = state->crtc_x,
-				.y1 = state->crtc_y,
-				.x2 = state->crtc_x + state->crtc_w,
-				.y2 = state->crtc_y + state->crtc_h,
-			};
-
-			meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
-		} else
-			meson_vpp_disable_interlace_vscaler_osd1(priv);
-
-		meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
-			   priv->viu.osd1_addr, priv->viu.osd1_stride,
-			   priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
-			   MESON_CANVAS_BLKMODE_LINEAR);
+		if (priv->canvas)
+			meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+				priv->viu.osd1_addr, priv->viu.osd1_stride,
+				priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+				MESON_CANVAS_BLKMODE_LINEAR, 0);
+		else
+			meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+				priv->viu.osd1_addr, priv->viu.osd1_stride,
+				priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+				MESON_CANVAS_BLKMODE_LINEAR, 0);
 
 		/* Enable OSD1 */
 		writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
@@ -228,6 +251,206 @@ void meson_crtc_irq(struct meson_drm *priv)
 		priv->viu.osd1_commit = false;
 	}
 
+	/* Update the VD1 registers */
+	if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
+
+		switch (priv->viu.vd1_planes) {
+		case 3:
+			if (priv->canvas)
+				meson_canvas_config(priv->canvas,
+						    priv->canvas_id_vd1_2,
+						    priv->viu.vd1_addr2,
+						    priv->viu.vd1_stride2,
+						    priv->viu.vd1_height2,
+						    MESON_CANVAS_WRAP_NONE,
+						    MESON_CANVAS_BLKMODE_LINEAR,
+						    MESON_CANVAS_ENDIAN_SWAP64);
+			else
+				meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
+						   priv->viu.vd1_addr2,
+						   priv->viu.vd1_stride2,
+						   priv->viu.vd1_height2,
+						   MESON_CANVAS_WRAP_NONE,
+						   MESON_CANVAS_BLKMODE_LINEAR,
+						   MESON_CANVAS_ENDIAN_SWAP64);
+		/* fallthrough */
+		case 2:
+			if (priv->canvas)
+				meson_canvas_config(priv->canvas,
+						    priv->canvas_id_vd1_1,
+						    priv->viu.vd1_addr1,
+						    priv->viu.vd1_stride1,
+						    priv->viu.vd1_height1,
+						    MESON_CANVAS_WRAP_NONE,
+						    MESON_CANVAS_BLKMODE_LINEAR,
+						    MESON_CANVAS_ENDIAN_SWAP64);
+			else
+				meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
+						   priv->viu.vd1_addr2,
+						   priv->viu.vd1_stride2,
+						   priv->viu.vd1_height2,
+						   MESON_CANVAS_WRAP_NONE,
+						   MESON_CANVAS_BLKMODE_LINEAR,
+						   MESON_CANVAS_ENDIAN_SWAP64);
+		/* fallthrough */
+		case 1:
+			if (priv->canvas)
+				meson_canvas_config(priv->canvas,
+						    priv->canvas_id_vd1_0,
+						    priv->viu.vd1_addr0,
+						    priv->viu.vd1_stride0,
+						    priv->viu.vd1_height0,
+						    MESON_CANVAS_WRAP_NONE,
+						    MESON_CANVAS_BLKMODE_LINEAR,
+						    MESON_CANVAS_ENDIAN_SWAP64);
+			else
+				meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
+						   priv->viu.vd1_addr2,
+						   priv->viu.vd1_stride2,
+						   priv->viu.vd1_height2,
+						   MESON_CANVAS_WRAP_NONE,
+						   MESON_CANVAS_BLKMODE_LINEAR,
+						   MESON_CANVAS_ENDIAN_SWAP64);
+		};
+
+		writel_relaxed(priv->viu.vd1_if0_gen_reg,
+				priv->io_base + _REG(VD1_IF0_GEN_REG));
+		writel_relaxed(priv->viu.vd1_if0_gen_reg,
+				priv->io_base + _REG(VD2_IF0_GEN_REG));
+		writel_relaxed(priv->viu.vd1_if0_gen_reg2,
+				priv->io_base + _REG(VD1_IF0_GEN_REG2));
+		writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+				priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+		writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+				priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+		writel_relaxed(priv->viu.viu_vd1_fmt_w,
+				priv->io_base + _REG(VIU_VD1_FMT_W));
+		writel_relaxed(priv->viu.viu_vd1_fmt_w,
+				priv->io_base + _REG(VIU_VD2_FMT_W));
+		writel_relaxed(priv->viu.vd1_if0_canvas0,
+				priv->io_base + _REG(VD1_IF0_CANVAS0));
+		writel_relaxed(priv->viu.vd1_if0_canvas0,
+				priv->io_base + _REG(VD1_IF0_CANVAS1));
+		writel_relaxed(priv->viu.vd1_if0_canvas0,
+				priv->io_base + _REG(VD2_IF0_CANVAS0));
+		writel_relaxed(priv->viu.vd1_if0_canvas0,
+				priv->io_base + _REG(VD2_IF0_CANVAS1));
+		writel_relaxed(priv->viu.vd1_if0_luma_x0,
+				priv->io_base + _REG(VD1_IF0_LUMA_X0));
+		writel_relaxed(priv->viu.vd1_if0_luma_x0,
+				priv->io_base + _REG(VD1_IF0_LUMA_X1));
+		writel_relaxed(priv->viu.vd1_if0_luma_x0,
+				priv->io_base + _REG(VD2_IF0_LUMA_X0));
+		writel_relaxed(priv->viu.vd1_if0_luma_x0,
+				priv->io_base + _REG(VD2_IF0_LUMA_X1));
+		writel_relaxed(priv->viu.vd1_if0_luma_y0,
+				priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+		writel_relaxed(priv->viu.vd1_if0_luma_y0,
+				priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+		writel_relaxed(priv->viu.vd1_if0_luma_y0,
+				priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+		writel_relaxed(priv->viu.vd1_if0_luma_y0,
+				priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+		writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+				priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+		writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+				priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+		writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+				priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+		writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+				priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+		writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+				priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+		writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+				priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+		writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+				priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+		writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+				priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+		writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+				priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+		writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+				priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+		writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+				priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+				priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+				priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+				priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+				priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+				priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+				priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+		writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+				priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
+		writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
+		writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
+		writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
+		writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+		writel_relaxed(priv->viu.vd1_range_map_y,
+				priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+		writel_relaxed(priv->viu.vd1_range_map_cb,
+				priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+		writel_relaxed(priv->viu.vd1_range_map_cr,
+				priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+		writel_relaxed(0x78404,
+				priv->io_base + _REG(VPP_SC_MISC));
+		writel_relaxed(priv->viu.vpp_pic_in_height,
+				priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
+		writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
+			priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
+		writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
+			priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+		writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
+			priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
+		writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
+			priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
+		writel_relaxed(priv->viu.vpp_hsc_region12_startp,
+				priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
+		writel_relaxed(priv->viu.vpp_hsc_region34_startp,
+				priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
+		writel_relaxed(priv->viu.vpp_hsc_region4_endp,
+				priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
+		writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
+				priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
+		writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
+			priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
+		writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
+			priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
+		writel_relaxed(priv->viu.vpp_line_in_length,
+				priv->io_base + _REG(VPP_LINE_IN_LENGTH));
+		writel_relaxed(priv->viu.vpp_preblend_h_size,
+				priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
+		writel_relaxed(priv->viu.vpp_vsc_region12_startp,
+				priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
+		writel_relaxed(priv->viu.vpp_vsc_region34_startp,
+				priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
+		writel_relaxed(priv->viu.vpp_vsc_region4_endp,
+				priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
+		writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
+				priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
+		writel_relaxed(priv->viu.vpp_vsc_ini_phase,
+				priv->io_base + _REG(VPP_VSC_INI_PHASE));
+		writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
+				priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
+		writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
+				priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
+		writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+
+		/* Enable VD1 */
+		writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+				    VPP_COLOR_MNG_ENABLE,
+				    VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+				    VPP_COLOR_MNG_ENABLE,
+				    priv->io_base + _REG(VPP_MISC));
+
+		priv->viu.vd1_commit = false;
+	}
+
 	drm_crtc_handle_vblank(priv->crtc);
 
 	spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index d344312..3ee4d4a4e 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -41,6 +41,7 @@
 
 #include "meson_drv.h"
 #include "meson_plane.h"
+#include "meson_overlay.h"
 #include "meson_crtc.h"
 #include "meson_venc_cvbs.h"
 
@@ -68,15 +69,7 @@
  * - Powering Up HDMI controller and PHY
  */
 
-static void meson_fb_output_poll_changed(struct drm_device *dev)
-{
-	struct meson_drm *priv = dev->dev_private;
-
-	drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
 static const struct drm_mode_config_funcs meson_mode_config_funcs = {
-	.output_poll_changed = meson_fb_output_poll_changed,
 	.atomic_check        = drm_atomic_helper_check,
 	.atomic_commit       = drm_atomic_helper_commit,
 	.fb_create           = drm_gem_fb_create,
@@ -216,24 +209,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 		goto free_drm;
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
-	if (!res) {
-		ret = -EINVAL;
-		goto free_drm;
-	}
-	/* Simply ioremap since it may be a shared register zone */
-	regs = devm_ioremap(dev, res->start, resource_size(res));
-	if (!regs) {
-		ret = -EADDRNOTAVAIL;
-		goto free_drm;
-	}
+	priv->canvas = meson_canvas_get(dev);
+	if (!IS_ERR(priv->canvas)) {
+		ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+		if (ret)
+			goto free_drm;
+		ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+		if (ret) {
+			meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+			goto free_drm;
+		}
+		ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+		if (ret) {
+			meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+			meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+			goto free_drm;
+		}
+		ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+		if (ret) {
+			meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+			meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+			meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+			goto free_drm;
+		}
+	} else {
+		priv->canvas = NULL;
 
-	priv->dmc = devm_regmap_init_mmio(dev, regs,
-					  &meson_regmap_config);
-	if (IS_ERR(priv->dmc)) {
-		dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
-		ret = PTR_ERR(priv->dmc);
-		goto free_drm;
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+		if (!res) {
+			ret = -EINVAL;
+			goto free_drm;
+		}
+		/* Simply ioremap since it may be a shared register zone */
+		regs = devm_ioremap(dev, res->start, resource_size(res));
+		if (!regs) {
+			ret = -EADDRNOTAVAIL;
+			goto free_drm;
+		}
+
+		priv->dmc = devm_regmap_init_mmio(dev, regs,
+						  &meson_regmap_config);
+		if (IS_ERR(priv->dmc)) {
+			dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+			ret = PTR_ERR(priv->dmc);
+			goto free_drm;
+		}
 	}
 
 	priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -272,6 +292,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 	if (ret)
 		goto free_drm;
 
+	ret = meson_overlay_create(priv);
+	if (ret)
+		goto free_drm;
+
 	ret = meson_crtc_create(priv);
 	if (ret)
 		goto free_drm;
@@ -282,13 +306,6 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
 	drm_mode_config_reset(drm);
 
-	priv->fbdev = drm_fbdev_cma_init(drm, 32,
-					 drm->mode_config.num_connector);
-	if (IS_ERR(priv->fbdev)) {
-		ret = PTR_ERR(priv->fbdev);
-		goto free_drm;
-	}
-
 	drm_kms_helper_poll_init(drm);
 
 	platform_set_drvdata(pdev, priv);
@@ -297,6 +314,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 	if (ret)
 		goto free_drm;
 
+	drm_fbdev_generic_setup(drm, 32);
+
 	return 0;
 
 free_drm:
@@ -315,9 +334,15 @@ static void meson_drv_unbind(struct device *dev)
 	struct drm_device *drm = dev_get_drvdata(dev);
 	struct meson_drm *priv = drm->dev_private;
 
+	if (priv->canvas) {
+		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
+	}
+
 	drm_dev_unregister(drm);
 	drm_kms_helper_poll_fini(drm);
-	drm_fbdev_cma_fini(priv->fbdev);
 	drm_mode_config_cleanup(drm);
 	drm_dev_put(drm);
 
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 8450d6ac..4dccf4c 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/of.h>
+#include <linux/soc/amlogic/meson-canvas.h>
 #include <drm/drmP.h>
 
 struct meson_drm {
@@ -31,10 +32,16 @@ struct meson_drm {
 	struct regmap *dmc;
 	int vsync_irq;
 
+	struct meson_canvas *canvas;
+	u8 canvas_id_osd1;
+	u8 canvas_id_vd1_0;
+	u8 canvas_id_vd1_1;
+	u8 canvas_id_vd1_2;
+
 	struct drm_device *drm;
 	struct drm_crtc *crtc;
-	struct drm_fbdev_cma *fbdev;
 	struct drm_plane *primary_plane;
+	struct drm_plane *overlay_plane;
 
 	/* Components Data */
 	struct {
@@ -46,6 +53,64 @@ struct meson_drm {
 		uint32_t osd1_addr;
 		uint32_t osd1_stride;
 		uint32_t osd1_height;
+		uint32_t osd_sc_ctrl0;
+		uint32_t osd_sc_i_wh_m1;
+		uint32_t osd_sc_o_h_start_end;
+		uint32_t osd_sc_o_v_start_end;
+		uint32_t osd_sc_v_ini_phase;
+		uint32_t osd_sc_v_phase_step;
+		uint32_t osd_sc_h_ini_phase;
+		uint32_t osd_sc_h_phase_step;
+		uint32_t osd_sc_h_ctrl0;
+		uint32_t osd_sc_v_ctrl0;
+
+		bool vd1_enabled;
+		bool vd1_commit;
+		unsigned int vd1_planes;
+		uint32_t vd1_if0_gen_reg;
+		uint32_t vd1_if0_luma_x0;
+		uint32_t vd1_if0_luma_y0;
+		uint32_t vd1_if0_chroma_x0;
+		uint32_t vd1_if0_chroma_y0;
+		uint32_t vd1_if0_repeat_loop;
+		uint32_t vd1_if0_luma0_rpt_pat;
+		uint32_t vd1_if0_chroma0_rpt_pat;
+		uint32_t vd1_range_map_y;
+		uint32_t vd1_range_map_cb;
+		uint32_t vd1_range_map_cr;
+		uint32_t viu_vd1_fmt_w;
+		uint32_t vd1_if0_canvas0;
+		uint32_t vd1_if0_gen_reg2;
+		uint32_t viu_vd1_fmt_ctrl;
+		uint32_t vd1_addr0;
+		uint32_t vd1_addr1;
+		uint32_t vd1_addr2;
+		uint32_t vd1_stride0;
+		uint32_t vd1_stride1;
+		uint32_t vd1_stride2;
+		uint32_t vd1_height0;
+		uint32_t vd1_height1;
+		uint32_t vd1_height2;
+		uint32_t vpp_pic_in_height;
+		uint32_t vpp_postblend_vd1_h_start_end;
+		uint32_t vpp_postblend_vd1_v_start_end;
+		uint32_t vpp_hsc_region12_startp;
+		uint32_t vpp_hsc_region34_startp;
+		uint32_t vpp_hsc_region4_endp;
+		uint32_t vpp_hsc_start_phase_step;
+		uint32_t vpp_hsc_region1_phase_slope;
+		uint32_t vpp_hsc_region3_phase_slope;
+		uint32_t vpp_line_in_length;
+		uint32_t vpp_preblend_h_size;
+		uint32_t vpp_vsc_region12_startp;
+		uint32_t vpp_vsc_region34_startp;
+		uint32_t vpp_vsc_region4_endp;
+		uint32_t vpp_vsc_start_phase_step;
+		uint32_t vpp_vsc_ini_phase;
+		uint32_t vpp_vsc_phase_ctrl;
+		uint32_t vpp_hsc_phase_ctrl;
+		uint32_t vpp_blend_vd2_h_start_end;
+		uint32_t vpp_blend_vd2_v_start_end;
 	} viu;
 
 	struct {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 2cb2ad2..807111e 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -594,17 +594,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 	dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
 		vclk_freq, venc_freq, hdmi_freq);
 
-	/* Finally filter by configurable vclk frequencies for VIC modes */
-	switch (vclk_freq) {
-	case 54000:
-	case 74250:
-	case 148500:
-	case 297000:
-	case 594000:
-		return MODE_OK;
-	}
-
-	return MODE_CLOCK_RANGE;
+	return meson_vclk_vic_supported_freq(vclk_freq);
 }
 
 /* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
new file mode 100644
index 0000000..691a9fd
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_overlay.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/* VD1_IF0_GEN_REG */
+#define VD_URGENT_CHROMA		BIT(28)
+#define VD_URGENT_LUMA			BIT(27)
+#define VD_HOLD_LINES(lines)		FIELD_PREP(GENMASK(24, 19), lines)
+#define VD_DEMUX_MODE_RGB		BIT(16)
+#define VD_BYTES_PER_PIXEL(val)		FIELD_PREP(GENMASK(15, 14), val)
+#define VD_CHRO_RPT_LASTL_CTRL		BIT(6)
+#define VD_LITTLE_ENDIAN		BIT(4)
+#define VD_SEPARATE_EN			BIT(1)
+#define VD_ENABLE			BIT(0)
+
+/* VD1_IF0_CANVAS0 */
+#define CANVAS_ADDR2(addr)		FIELD_PREP(GENMASK(23, 16), addr)
+#define CANVAS_ADDR1(addr)		FIELD_PREP(GENMASK(15, 8), addr)
+#define CANVAS_ADDR0(addr)		FIELD_PREP(GENMASK(7, 0), addr)
+
+/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
+#define VD_X_START(value)		FIELD_PREP(GENMASK(14, 0), value)
+#define VD_X_END(value)			FIELD_PREP(GENMASK(30, 16), value)
+
+/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
+#define VD_Y_START(value)		FIELD_PREP(GENMASK(12, 0), value)
+#define VD_Y_END(value)			FIELD_PREP(GENMASK(28, 16), value)
+
+/* VD1_IF0_GEN_REG2 */
+#define VD_COLOR_MAP(value)		FIELD_PREP(GENMASK(1, 0), value)
+
+/* VIU_VD1_FMT_CTRL */
+#define VD_HORZ_Y_C_RATIO(value)	FIELD_PREP(GENMASK(22, 21), value)
+#define VD_HORZ_FMT_EN			BIT(20)
+#define VD_VERT_RPT_LINE0		BIT(16)
+#define VD_VERT_INITIAL_PHASE(value)	FIELD_PREP(GENMASK(11, 8), value)
+#define VD_VERT_PHASE_STEP(value)	FIELD_PREP(GENMASK(7, 1), value)
+#define VD_VERT_FMT_EN			BIT(0)
+
+/* VPP_POSTBLEND_VD1_H_START_END */
+#define VD_H_END(value)			FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_START(value)		FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_POSTBLEND_VD1_V_START_END */
+#define VD_V_END(value)			FIELD_PREP(GENMASK(11, 0), value)
+#define VD_V_START(value)		FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_BLEND_VD2_V_START_END */
+#define VD2_V_END(value)		FIELD_PREP(GENMASK(11, 0), value)
+#define VD2_V_START(value)		FIELD_PREP(GENMASK(27, 16), value)
+
+/* VIU_VD1_FMT_W */
+#define VD_V_WIDTH(value)		FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_WIDTH(value)		FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
+#define VD_REGION24_START(value)	FIELD_PREP(GENMASK(11, 0), value)
+#define VD_REGION13_END(value)		FIELD_PREP(GENMASK(27, 16), value)
+
+struct meson_overlay {
+	struct drm_plane base;
+	struct meson_drm *priv;
+};
+#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+
+static int meson_overlay_atomic_check(struct drm_plane *plane,
+				      struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+
+	if (!state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	return drm_atomic_helper_check_plane_state(state, crtc_state,
+						   FRAC_16_16(1, 5),
+						   FRAC_16_16(5, 1),
+						   true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+	return value >> 16;
+}
+
+static const uint8_t skip_tab[6] = {
+	0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
+};
+
+static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
+					     int *repeat, bool interlace)
+{
+	int offset_in = 0;
+	int offset_out = 0;
+	int repeat_skip = 0;
+
+	if (!interlace && ratio_y > (1 << 18))
+		offset_out = (1 * ratio_y) >> 10;
+
+	while ((offset_in + (4 << 8)) <= offset_out) {
+		repeat_skip++;
+		offset_in += 4 << 8;
+	}
+
+	*phase = (offset_out - offset_in) >> 2;
+
+	if (*phase > 0x100)
+		repeat_skip++;
+
+	*phase = *phase & 0xff;
+
+	if (repeat_skip > 5)
+		repeat_skip = 5;
+
+	*repeat = skip_tab[repeat_skip];
+}
+
+static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
+					      struct drm_plane *plane,
+					      bool interlace_mode)
+{
+	struct drm_crtc_state *crtc_state = priv->crtc->state;
+	int video_top, video_left, video_width, video_height;
+	struct drm_plane_state *state = plane->state;
+	unsigned int vd_start_lines, vd_end_lines;
+	unsigned int hd_start_lines, hd_end_lines;
+	unsigned int crtc_height, crtc_width;
+	unsigned int vsc_startp, vsc_endp;
+	unsigned int hsc_startp, hsc_endp;
+	unsigned int crop_top, crop_left;
+	int vphase, vphase_repeat_skip;
+	unsigned int ratio_x, ratio_y;
+	int temp_height, temp_width;
+	unsigned int w_in, h_in;
+	int temp, start, end;
+
+	if (!crtc_state) {
+		DRM_ERROR("Invalid crtc_state\n");
+		return;
+	}
+
+	crtc_height = crtc_state->mode.vdisplay;
+	crtc_width = crtc_state->mode.hdisplay;
+
+	w_in = fixed16_to_int(state->src_w);
+	h_in = fixed16_to_int(state->src_h);
+	crop_top = fixed16_to_int(state->src_x);
+	crop_left = fixed16_to_int(state->src_x);
+
+	video_top = state->crtc_y;
+	video_left = state->crtc_x;
+	video_width = state->crtc_w;
+	video_height = state->crtc_h;
+
+	DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
+		  crtc_width, crtc_height, interlace_mode);
+	DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
+		  w_in, h_in, crop_top, crop_left);
+	DRM_DEBUG("video top %d left %d width %d height %d\n",
+		  video_top, video_left, video_width, video_height);
+
+	ratio_x = (w_in << 18) / video_width;
+	ratio_y = (h_in << 18) / video_height;
+
+	if (ratio_x * video_width < (w_in << 18))
+		ratio_x++;
+
+	DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
+
+	meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
+					 interlace_mode);
+
+	DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
+
+	/* Vertical */
+
+	start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
+	end = (h_in << 18) / ratio_y + start - 1;
+
+	if (video_top < 0 && start < 0)
+		vd_start_lines = (-(start) * ratio_y) >> 18;
+	else if (start < video_top)
+		vd_start_lines = ((video_top - start) * ratio_y) >> 18;
+	else
+		vd_start_lines = 0;
+
+	if (video_top < 0)
+		temp_height = min_t(unsigned int,
+				    video_top + video_height - 1,
+				    crtc_height - 1);
+	else
+		temp_height = min_t(unsigned int,
+				    video_top + video_height - 1,
+				    crtc_height - 1) - video_top + 1;
+
+	temp = vd_start_lines + (temp_height * ratio_y >> 18);
+	vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
+
+	vd_start_lines += crop_left;
+	vd_end_lines += crop_left;
+
+	/*
+	 * TOFIX: Input frames are handled and scaled like progressive frames,
+	 * proper handling of interlaced field input frames need to be figured
+	 * out using the proper framebuffer flags set by userspace.
+	 */
+	if (interlace_mode) {
+		start >>= 1;
+		end >>= 1;
+	}
+
+	vsc_startp = max_t(int, start,
+			   max_t(int, 0, video_top));
+	vsc_endp = min_t(int, end,
+			 min_t(int, crtc_height - 1,
+			       video_top + video_height - 1));
+
+	DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
+		 vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
+
+	/* Horizontal */
+
+	start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
+	end = (w_in << 18) / ratio_x + start - 1;
+
+	if (video_left < 0 && start < 0)
+		hd_start_lines = (-(start) * ratio_x) >> 18;
+	else if (start < video_left)
+		hd_start_lines = ((video_left - start) * ratio_x) >> 18;
+	else
+		hd_start_lines = 0;
+
+	if (video_left < 0)
+		temp_width = min_t(unsigned int,
+				   video_left + video_width - 1,
+				   crtc_width - 1);
+	else
+		temp_width = min_t(unsigned int,
+				   video_left + video_width - 1,
+				   crtc_width - 1) - video_left + 1;
+
+	temp = hd_start_lines + (temp_width * ratio_x >> 18);
+	hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
+
+	priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+	hsc_startp = max_t(int, start, max_t(int, 0, video_left));
+	hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
+					 video_left + video_width - 1));
+
+	hd_start_lines += crop_top;
+	hd_end_lines += crop_top;
+
+	DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
+		 hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
+
+	priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+
+	priv->viu.vpp_vsc_ini_phase = vphase << 8;
+	priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
+				       vphase_repeat_skip;
+
+	priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
+				    VD_X_END(hd_end_lines);
+	priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
+				      VD_X_END(hd_end_lines >> 1);
+
+	priv->viu.viu_vd1_fmt_w =
+			VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
+			VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
+
+	priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
+				    VD_Y_END(vd_end_lines);
+
+	priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
+				      VD_Y_END(vd_end_lines >> 1);
+
+	priv->viu.vpp_pic_in_height = h_in;
+
+	priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
+						  VD_H_END(hsc_endp);
+	priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
+					      VD_H_END(hd_end_lines);
+	priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
+					    VD_REGION24_START(hsc_startp);
+	priv->viu.vpp_hsc_region34_startp =
+				VD_REGION13_END(hsc_startp) |
+				VD_REGION24_START(hsc_endp - hsc_startp);
+	priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
+	priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
+	priv->viu.vpp_hsc_region1_phase_slope = 0;
+	priv->viu.vpp_hsc_region3_phase_slope = 0;
+	priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
+
+	priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+	priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
+
+	priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
+						  VD_V_END(vsc_endp);
+	priv->viu.vpp_blend_vd2_v_start_end =
+				VD2_V_START((vd_end_lines + 1) >> 1) |
+				VD2_V_END(vd_end_lines);
+
+	priv->viu.vpp_vsc_region12_startp = 0;
+	priv->viu.vpp_vsc_region34_startp =
+				VD_REGION13_END(vsc_endp - vsc_startp) |
+				VD_REGION24_START(vsc_endp - vsc_startp);
+	priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
+	priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+}
+
+static void meson_overlay_atomic_update(struct drm_plane *plane,
+					struct drm_plane_state *old_state)
+{
+	struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = state->fb;
+	struct meson_drm *priv = meson_overlay->priv;
+	struct drm_gem_cma_object *gem;
+	unsigned long flags;
+	bool interlace_mode;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	/* Fallback is canvas provider is not available */
+	if (!priv->canvas) {
+		priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
+		priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
+		priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
+	}
+
+	interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+	spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+	priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
+				    VD_URGENT_LUMA |
+				    VD_HOLD_LINES(9) |
+				    VD_CHRO_RPT_LASTL_CTRL |
+				    VD_ENABLE;
+
+	/* Setup scaler params */
+	meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
+
+	priv->viu.vd1_if0_repeat_loop = 0;
+	priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
+	priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
+	priv->viu.vd1_range_map_y = 0;
+	priv->viu.vd1_range_map_cb = 0;
+	priv->viu.vd1_range_map_cr = 0;
+
+	/* Default values for RGB888/YUV444 */
+	priv->viu.vd1_if0_gen_reg2 = 0;
+	priv->viu.viu_vd1_fmt_ctrl = 0;
+
+	switch (fb->format->format) {
+	/* TOFIX DRM_FORMAT_RGB888 should be supported */
+	case DRM_FORMAT_YUYV:
+		priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
+		priv->viu.vd1_if0_canvas0 =
+					CANVAS_ADDR2(priv->canvas_id_vd1_0) |
+					CANVAS_ADDR1(priv->canvas_id_vd1_0) |
+					CANVAS_ADDR0(priv->canvas_id_vd1_0);
+		priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+					     VD_HORZ_FMT_EN |
+					     VD_VERT_RPT_LINE0 |
+					     VD_VERT_INITIAL_PHASE(12) |
+					     VD_VERT_PHASE_STEP(16) | /* /2 */
+					     VD_VERT_FMT_EN;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+		priv->viu.vd1_if0_canvas0 =
+					CANVAS_ADDR2(priv->canvas_id_vd1_1) |
+					CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+					CANVAS_ADDR0(priv->canvas_id_vd1_0);
+		if (fb->format->format == DRM_FORMAT_NV12)
+			priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
+		else
+			priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
+		priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+					     VD_HORZ_FMT_EN |
+					     VD_VERT_RPT_LINE0 |
+					     VD_VERT_INITIAL_PHASE(12) |
+					     VD_VERT_PHASE_STEP(8) | /* /4 */
+					     VD_VERT_FMT_EN;
+		break;
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YUV410:
+		priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+		priv->viu.vd1_if0_canvas0 =
+					CANVAS_ADDR2(priv->canvas_id_vd1_2) |
+					CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+					CANVAS_ADDR0(priv->canvas_id_vd1_0);
+		switch (fb->format->format) {
+		case DRM_FORMAT_YUV422:
+			priv->viu.viu_vd1_fmt_ctrl =
+					VD_HORZ_Y_C_RATIO(1) | /* /2 */
+					VD_HORZ_FMT_EN |
+					VD_VERT_RPT_LINE0 |
+					VD_VERT_INITIAL_PHASE(12) |
+					VD_VERT_PHASE_STEP(16) | /* /2 */
+					VD_VERT_FMT_EN;
+			break;
+		case DRM_FORMAT_YUV420:
+			priv->viu.viu_vd1_fmt_ctrl =
+					VD_HORZ_Y_C_RATIO(1) | /* /2 */
+					VD_HORZ_FMT_EN |
+					VD_VERT_RPT_LINE0 |
+					VD_VERT_INITIAL_PHASE(12) |
+					VD_VERT_PHASE_STEP(8) | /* /4 */
+					VD_VERT_FMT_EN;
+			break;
+		case DRM_FORMAT_YUV411:
+			priv->viu.viu_vd1_fmt_ctrl =
+					VD_HORZ_Y_C_RATIO(2) | /* /4 */
+					VD_HORZ_FMT_EN |
+					VD_VERT_RPT_LINE0 |
+					VD_VERT_INITIAL_PHASE(12) |
+					VD_VERT_PHASE_STEP(16) | /* /2 */
+					VD_VERT_FMT_EN;
+			break;
+		case DRM_FORMAT_YUV410:
+			priv->viu.viu_vd1_fmt_ctrl =
+					VD_HORZ_Y_C_RATIO(2) | /* /4 */
+					VD_HORZ_FMT_EN |
+					VD_VERT_RPT_LINE0 |
+					VD_VERT_INITIAL_PHASE(12) |
+					VD_VERT_PHASE_STEP(8) | /* /4 */
+					VD_VERT_FMT_EN;
+			break;
+		}
+		break;
+	}
+
+	/* Update Canvas with buffer address */
+	priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
+
+	switch (priv->viu.vd1_planes) {
+	case 3:
+		gem = drm_fb_cma_get_gem_obj(fb, 2);
+		priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+		priv->viu.vd1_stride2 = fb->pitches[2];
+		priv->viu.vd1_height2 =
+			drm_format_plane_height(fb->height,
+						fb->format->format, 2);
+		DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
+			 priv->viu.vd1_addr2,
+			 priv->viu.vd1_stride2,
+			 priv->viu.vd1_height2);
+	/* fallthrough */
+	case 2:
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+		priv->viu.vd1_stride1 = fb->pitches[1];
+		priv->viu.vd1_height1 =
+			drm_format_plane_height(fb->height,
+						fb->format->format, 1);
+		DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
+			 priv->viu.vd1_addr1,
+			 priv->viu.vd1_stride1,
+			 priv->viu.vd1_height1);
+	/* fallthrough */
+	case 1:
+		gem = drm_fb_cma_get_gem_obj(fb, 0);
+		priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+		priv->viu.vd1_stride0 = fb->pitches[0];
+		priv->viu.vd1_height0 =
+			drm_format_plane_height(fb->height,
+						fb->format->format, 0);
+		DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
+			 priv->viu.vd1_addr0,
+			 priv->viu.vd1_stride0,
+			 priv->viu.vd1_height0);
+	}
+
+	priv->viu.vd1_enabled = true;
+
+	spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+
+	DRM_DEBUG_DRIVER("\n");
+}
+
+static void meson_overlay_atomic_disable(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+	struct meson_drm *priv = meson_overlay->priv;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	priv->viu.vd1_enabled = false;
+
+	/* Disable VD1 */
+	writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+			    priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
+	.atomic_check	= meson_overlay_atomic_check,
+	.atomic_disable	= meson_overlay_atomic_disable,
+	.atomic_update	= meson_overlay_atomic_update,
+	.prepare_fb	= drm_gem_fb_prepare_fb,
+};
+
+static const struct drm_plane_funcs meson_overlay_funcs = {
+	.update_plane		= drm_atomic_helper_update_plane,
+	.disable_plane		= drm_atomic_helper_disable_plane,
+	.destroy		= drm_plane_cleanup,
+	.reset			= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_YUV444,
+	DRM_FORMAT_YUV422,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YUV411,
+	DRM_FORMAT_YUV410,
+};
+
+int meson_overlay_create(struct meson_drm *priv)
+{
+	struct meson_overlay *meson_overlay;
+	struct drm_plane *plane;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
+				   GFP_KERNEL);
+	if (!meson_overlay)
+		return -ENOMEM;
+
+	meson_overlay->priv = priv;
+	plane = &meson_overlay->base;
+
+	drm_universal_plane_init(priv->drm, plane, 0xFF,
+				 &meson_overlay_funcs,
+				 supported_drm_formats,
+				 ARRAY_SIZE(supported_drm_formats),
+				 NULL,
+				 DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
+
+	drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
+
+	priv->overlay_plane = plane;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_overlay.h b/drivers/gpu/drm/meson/meson_overlay.h
new file mode 100644
index 0000000..dae24f5
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OVERLAY_H
+#define __MESON_OVERLAY_H
+
+#include "meson_drv.h"
+
+int meson_overlay_create(struct meson_drm *priv);
+
+#endif /* __MESON_OVERLAY_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80df..6119a02 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/bitfield.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_rect.h>
 
 #include "meson_plane.h"
@@ -39,12 +41,51 @@
 #include "meson_canvas.h"
 #include "meson_registers.h"
 
+/* OSD_SCI_WH_M1 */
+#define SCI_WH_M1_W(w)			FIELD_PREP(GENMASK(28, 16), w)
+#define SCI_WH_M1_H(h)			FIELD_PREP(GENMASK(12, 0), h)
+
+/* OSD_SCO_H_START_END */
+/* OSD_SCO_V_START_END */
+#define SCO_HV_START(start)		FIELD_PREP(GENMASK(27, 16), start)
+#define SCO_HV_END(end)			FIELD_PREP(GENMASK(11, 0), end)
+
+/* OSD_SC_CTRL0 */
+#define SC_CTRL0_PATH_EN		BIT(3)
+#define SC_CTRL0_SEL_OSD1		BIT(2)
+
+/* OSD_VSC_CTRL0 */
+#define VSC_BANK_LEN(value)		FIELD_PREP(GENMASK(2, 0), value)
+#define VSC_TOP_INI_RCV_NUM(value)	FIELD_PREP(GENMASK(6, 3), value)
+#define VSC_TOP_RPT_L0_NUM(value)	FIELD_PREP(GENMASK(9, 8), value)
+#define VSC_BOT_INI_RCV_NUM(value)	FIELD_PREP(GENMASK(14, 11), value)
+#define VSC_BOT_RPT_L0_NUM(value)	FIELD_PREP(GENMASK(17, 16), value)
+#define VSC_PROG_INTERLACE		BIT(23)
+#define VSC_VERTICAL_SCALER_EN		BIT(24)
+
+/* OSD_VSC_INI_PHASE */
+#define VSC_INI_PHASE_BOT(bottom)	FIELD_PREP(GENMASK(31, 16), bottom)
+#define VSC_INI_PHASE_TOP(top)		FIELD_PREP(GENMASK(15, 0), top)
+
+/* OSD_HSC_CTRL0 */
+#define HSC_BANK_LENGTH(value)		FIELD_PREP(GENMASK(2, 0), value)
+#define HSC_INI_RCV_NUM0(value)		FIELD_PREP(GENMASK(6, 3), value)
+#define HSC_RPT_P0_NUM0(value)		FIELD_PREP(GENMASK(9, 8), value)
+#define HSC_HORIZ_SCALER_EN		BIT(22)
+
+/* VPP_OSD_VSC_PHASE_STEP */
+/* VPP_OSD_HSC_PHASE_STEP */
+#define SC_PHASE_STEP(value)		FIELD_PREP(GENMASK(27, 0), value)
+
 struct meson_plane {
 	struct drm_plane base;
 	struct meson_drm *priv;
+	bool enabled;
 };
 #define to_meson_plane(x) container_of(x, struct meson_plane, base)
 
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+
 static int meson_plane_atomic_check(struct drm_plane *plane,
 				    struct drm_plane_state *state)
 {
@@ -57,10 +98,15 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
 	if (IS_ERR(crtc_state))
 		return PTR_ERR(crtc_state);
 
+	/*
+	 * Only allow :
+	 * - Upscaling up to 5x, vertical and horizontal
+	 * - Final coordinates must match crtc size
+	 */
 	return drm_atomic_helper_check_plane_state(state, crtc_state,
+						   FRAC_16_16(1, 5),
 						   DRM_PLANE_HELPER_NO_SCALING,
-						   DRM_PLANE_HELPER_NO_SCALING,
-						   true, true);
+						   false, true);
 }
 
 /* Takes a fixed 16.16 number and converts it to integer. */
@@ -74,22 +120,20 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 {
 	struct meson_plane *meson_plane = to_meson_plane(plane);
 	struct drm_plane_state *state = plane->state;
-	struct drm_framebuffer *fb = state->fb;
+	struct drm_rect dest = drm_plane_state_dest(state);
 	struct meson_drm *priv = meson_plane->priv;
+	struct drm_framebuffer *fb = state->fb;
 	struct drm_gem_cma_object *gem;
-	struct drm_rect src = {
-		.x1 = (state->src_x),
-		.y1 = (state->src_y),
-		.x2 = (state->src_x + state->src_w),
-		.y2 = (state->src_y + state->src_h),
-	};
-	struct drm_rect dest = {
-		.x1 = state->crtc_x,
-		.y1 = state->crtc_y,
-		.x2 = state->crtc_x + state->crtc_w,
-		.y2 = state->crtc_y + state->crtc_h,
-	};
 	unsigned long flags;
+	int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
+	int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
+	int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
+	int hf_phase_step, vf_phase_step;
+	int src_w, src_h, dst_w, dst_h;
+	int bot_ini_phase;
+	int hf_bank_len;
+	int vf_bank_len;
+	u8 canvas_id_osd1;
 
 	/*
 	 * Update Coordinates
@@ -104,8 +148,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 				   (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
 				   OSD_BLK0_ENABLE;
 
+	if (priv->canvas)
+		canvas_id_osd1 = priv->canvas_id_osd1;
+	else
+		canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+
 	/* Set up BLK0 to point to the right canvas */
-	priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+	priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
 				      OSD_ENDIANNESS_LE);
 
 	/* On GXBB, Use the old non-HDR RGB2YUV converter */
@@ -137,23 +186,115 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 		break;
 	};
 
-	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
-		priv->viu.osd1_interlace = true;
+	/* Default scaler parameters */
+	vsc_bot_rcv_num = 0;
+	vsc_bot_rpt_p0_num = 0;
+	hf_bank_len = 4;
+	vf_bank_len = 4;
 
+	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+		vsc_bot_rcv_num = 6;
+		vsc_bot_rpt_p0_num = 2;
+	}
+
+	hsc_ini_rcv_num = hf_bank_len;
+	vsc_ini_rcv_num = vf_bank_len;
+	hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
+	vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
+
+	src_w = fixed16_to_int(state->src_w);
+	src_h = fixed16_to_int(state->src_h);
+	dst_w = state->crtc_w;
+	dst_h = state->crtc_h;
+
+	/*
+	 * When the output is interlaced, the OSD must switch between
+	 * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+	 * at each vsync.
+	 * But the vertical scaler can provide such funtionnality if
+	 * is configured for 2:1 scaling with interlace options enabled.
+	 */
+	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		dest.y1 /= 2;
 		dest.y2 /= 2;
-	} else
-		priv->viu.osd1_interlace = false;
+		dst_h /= 2;
+	}
+
+	hf_phase_step = ((src_w << 18) / dst_w) << 6;
+	vf_phase_step = (src_h << 20) / dst_h;
+
+	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+		bot_ini_phase = ((vf_phase_step / 2) >> 4);
+	else
+		bot_ini_phase = 0;
+
+	vf_phase_step = (vf_phase_step << 4);
+
+	/* In interlaced mode, scaler is always active */
+	if (src_h != dst_h || src_w != dst_w) {
+		priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
+					   SCI_WH_M1_H(src_h - 1);
+		priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
+						 SCO_HV_END(dest.x2 - 1);
+		priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
+						 SCO_HV_END(dest.y2 - 1);
+		/* Enable OSD Scaler */
+		priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
+	} else {
+		priv->viu.osd_sc_i_wh_m1 = 0;
+		priv->viu.osd_sc_o_h_start_end = 0;
+		priv->viu.osd_sc_o_v_start_end = 0;
+		priv->viu.osd_sc_ctrl0 = 0;
+	}
+
+	/* In interlaced mode, vertical scaler is always active */
+	if (src_h != dst_h) {
+		priv->viu.osd_sc_v_ctrl0 =
+					VSC_BANK_LEN(vf_bank_len) |
+					VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
+					VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
+					VSC_VERTICAL_SCALER_EN;
+
+		if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+			priv->viu.osd_sc_v_ctrl0 |=
+					VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
+					VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
+					VSC_PROG_INTERLACE;
+
+		priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
+		priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
+	} else {
+		priv->viu.osd_sc_v_ctrl0 = 0;
+		priv->viu.osd_sc_v_phase_step = 0;
+		priv->viu.osd_sc_v_ini_phase = 0;
+	}
+
+	/* Horizontal scaler is only used if width does not match */
+	if (src_w != dst_w) {
+		priv->viu.osd_sc_h_ctrl0 =
+					HSC_BANK_LENGTH(hf_bank_len) |
+					HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
+					HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
+					HSC_HORIZ_SCALER_EN;
+		priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
+		priv->viu.osd_sc_h_ini_phase = 0;
+	} else {
+		priv->viu.osd_sc_h_ctrl0 = 0;
+		priv->viu.osd_sc_h_phase_step = 0;
+		priv->viu.osd_sc_h_ini_phase = 0;
+	}
 
 	/*
 	 * The format of these registers is (x2 << 16 | x1),
 	 * where x2 is exclusive.
 	 * e.g. +30x1920 would be (1919 << 16) | 30
 	 */
-	priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
-					fixed16_to_int(src.x1);
-	priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
-					fixed16_to_int(src.y1);
+	priv->viu.osd1_blk0_cfg[1] =
+				((fixed16_to_int(state->src.x2) - 1) << 16) |
+				fixed16_to_int(state->src.x1);
+	priv->viu.osd1_blk0_cfg[2] =
+				((fixed16_to_int(state->src.y2) - 1) << 16) |
+				fixed16_to_int(state->src.y1);
 	priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
 	priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
 
@@ -164,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 	priv->viu.osd1_stride = fb->pitches[0];
 	priv->viu.osd1_height = fb->height;
 
+	if (!meson_plane->enabled) {
+		/* Reset OSD1 before enabling it on GXL+ SoCs */
+		if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+		    meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+			meson_viu_osd1_reset(priv);
+
+		meson_plane->enabled = true;
+	}
+
 	spin_unlock_irqrestore(&priv->drm->event_lock, flags);
 }
 
@@ -177,12 +327,15 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
 	writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
 			    priv->io_base + _REG(VPP_MISC));
 
+	meson_plane->enabled = false;
+
 }
 
 static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
 	.atomic_check	= meson_plane_atomic_check,
 	.atomic_disable	= meson_plane_atomic_disable,
 	.atomic_update	= meson_plane_atomic_update,
+	.prepare_fb	= drm_gem_fb_prepare_fb,
 };
 
 static const struct drm_plane_funcs meson_plane_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index bca8714..5c7e02c 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -286,6 +286,7 @@
 #define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
 #define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
 #define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VD1_IF0_GEN_REG3 0x1aa7
 #define VIU_OSD1_EOTF_CTL 0x1ad4
 #define VIU_OSD1_EOTF_COEF00_01 0x1ad5
 #define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -297,6 +298,7 @@
 #define VIU_OSD1_OETF_CTL 0x1adc
 #define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
 #define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+#define AFBC_ENABLE 0x1ae0
 
 /* vpp */
 #define VPP_DUMMY_DATA 0x1d00
@@ -349,6 +351,7 @@
 #define		VPP_VD2_PREBLEND	BIT(15)
 #define		VPP_OSD1_PREBLEND	BIT(16)
 #define		VPP_OSD2_PREBLEND	BIT(17)
+#define		VPP_COLOR_MNG_ENABLE	BIT(28)
 #define VPP_OFIFO_SIZE 0x1d27
 #define VPP_FIFO_STATUS 0x1d28
 #define VPP_SMOKE_CTRL 0x1d29
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ae54732..f6ba35a 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -117,6 +117,8 @@
 #define HDMI_PLL_RESET		BIT(28)
 #define HDMI_PLL_LOCK		BIT(31)
 
+#define FREQ_1000_1001(_freq)	DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+
 /* VID PLL Dividers */
 enum {
 	VID_PLL_DIV_1 = 0,
@@ -323,7 +325,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
 enum {
 /* PLL	O1 O2 O3 VP DV     EN TX */
 /* 4320 /4 /4 /1 /5 /1  => /2 /2 */
-	MESON_VCLK_HDMI_ENCI_54000 = 1,
+	MESON_VCLK_HDMI_ENCI_54000 = 0,
 /* 4320 /4 /4 /1 /5 /1  => /1 /2 */
 	MESON_VCLK_HDMI_DDR_54000,
 /* 2970 /4 /1 /1 /5 /1  => /1 /2 */
@@ -339,6 +341,7 @@ enum {
 };
 
 struct meson_vclk_params {
+	unsigned int pixel_freq;
 	unsigned int pll_base_freq;
 	unsigned int pll_od1;
 	unsigned int pll_od2;
@@ -347,6 +350,7 @@ struct meson_vclk_params {
 	unsigned int vclk_div;
 } params[] = {
 	[MESON_VCLK_HDMI_ENCI_54000] = {
+		.pixel_freq = 54000,
 		.pll_base_freq = 4320000,
 		.pll_od1 = 4,
 		.pll_od2 = 4,
@@ -355,6 +359,7 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_DDR_54000] = {
+		.pixel_freq = 54000,
 		.pll_base_freq = 4320000,
 		.pll_od1 = 4,
 		.pll_od2 = 4,
@@ -363,6 +368,7 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_DDR_148500] = {
+		.pixel_freq = 148500,
 		.pll_base_freq = 2970000,
 		.pll_od1 = 4,
 		.pll_od2 = 1,
@@ -371,6 +377,7 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_74250] = {
+		.pixel_freq = 74250,
 		.pll_base_freq = 2970000,
 		.pll_od1 = 2,
 		.pll_od2 = 2,
@@ -379,6 +386,7 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_148500] = {
+		.pixel_freq = 148500,
 		.pll_base_freq = 2970000,
 		.pll_od1 = 1,
 		.pll_od2 = 2,
@@ -387,6 +395,7 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_297000] = {
+		.pixel_freq = 297000,
 		.pll_base_freq = 2970000,
 		.pll_od1 = 1,
 		.pll_od2 = 1,
@@ -395,6 +404,7 @@ struct meson_vclk_params {
 		.vclk_div = 2,
 	},
 	[MESON_VCLK_HDMI_594000] = {
+		.pixel_freq = 594000,
 		.pll_base_freq = 5940000,
 		.pll_od1 = 1,
 		.pll_od2 = 1,
@@ -402,6 +412,7 @@ struct meson_vclk_params {
 		.vid_pll_div = VID_PLL_DIV_5,
 		.vclk_div = 1,
 	},
+	{ /* sentinel */ },
 };
 
 static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -626,12 +637,37 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
 		  pll_freq);
 }
 
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq)
+{
+	int i;
+
+	DRM_DEBUG_DRIVER("freq = %d\n", freq);
+
+	for (i = 0 ; params[i].pixel_freq ; ++i) {
+		DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+				 i, params[i].pixel_freq,
+				 FREQ_1000_1001(params[i].pixel_freq));
+		/* Match strict frequency */
+		if (freq == params[i].pixel_freq)
+			return MODE_OK;
+		/* Match 1000/1001 variant */
+		if (freq == FREQ_1000_1001(params[i].pixel_freq))
+			return MODE_OK;
+	}
+
+	return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
+
 static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
 			   unsigned int od1, unsigned int od2, unsigned int od3,
 			   unsigned int vid_pll_div, unsigned int vclk_div,
 			   unsigned int hdmi_tx_div, unsigned int venc_div,
-			   bool hdmi_use_enci)
+			   bool hdmi_use_enci, bool vic_alternate_clock)
 {
+	unsigned int m = 0, frac = 0;
+
 	/* Set HDMI-TX sys clock */
 	regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
 			   CTS_HDMI_SYS_SEL_MASK, 0);
@@ -646,34 +682,38 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
 	} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
 		switch (pll_base_freq) {
 		case 2970000:
-			meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
-						  od1, od2, od3);
+			m = 0x3d;
+			frac = vic_alternate_clock ? 0xd02 : 0xe00;
 			break;
 		case 4320000:
-			meson_hdmi_pll_set_params(priv, 0x5a, 0,
-						  od1, od2, od3);
+			m = vic_alternate_clock ? 0x59 : 0x5a;
+			frac = vic_alternate_clock ? 0xe8f : 0;
 			break;
 		case 5940000:
-			meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
-						  od1, od2, od3);
+			m = 0x7b;
+			frac = vic_alternate_clock ? 0xa05 : 0xc00;
 			break;
 		}
+
+		meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
 	} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
 		   meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
 		switch (pll_base_freq) {
 		case 2970000:
-			meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
-						  od1, od2, od3);
+			m = 0x7b;
+			frac = vic_alternate_clock ? 0x281 : 0x300;
 			break;
 		case 4320000:
-			meson_hdmi_pll_set_params(priv, 0xb4, 0,
-						  od1, od2, od3);
+			m = vic_alternate_clock ? 0xb3 : 0xb4;
+			frac = vic_alternate_clock ? 0x347 : 0;
 			break;
 		case 5940000:
-			meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
-						  od1, od2, od3);
+			m = 0xf7;
+			frac = vic_alternate_clock ? 0x102 : 0x200;
 			break;
 		}
+
+		meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
 	}
 
 	/* Setup vid_pll divider */
@@ -826,6 +866,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		      unsigned int vclk_freq, unsigned int venc_freq,
 		      unsigned int dac_freq, bool hdmi_use_enci)
 {
+	bool vic_alternate_clock = false;
 	unsigned int freq;
 	unsigned int hdmi_tx_div;
 	unsigned int venc_div;
@@ -843,7 +884,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		 * - encp encoder
 		 */
 		meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
-			       VID_PLL_DIV_5, 2, 1, 1, false);
+			       VID_PLL_DIV_5, 2, 1, 1, false, false);
 		return;
 	}
 
@@ -863,31 +904,35 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		return;
 	}
 
-	switch (vclk_freq) {
-	case 54000:
-		if (hdmi_use_enci)
-			freq = MESON_VCLK_HDMI_ENCI_54000;
-		else
-			freq = MESON_VCLK_HDMI_DDR_54000;
-		break;
-	case 74250:
-		freq = MESON_VCLK_HDMI_74250;
-		break;
-	case 148500:
-		if (dac_freq != 148500)
-			freq = MESON_VCLK_HDMI_DDR_148500;
-		else
-			freq = MESON_VCLK_HDMI_148500;
-		break;
-	case 297000:
-		freq = MESON_VCLK_HDMI_297000;
-		break;
-	case 594000:
-		freq = MESON_VCLK_HDMI_594000;
-		break;
-	default:
-		pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
-		       vclk_freq);
+	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+		if (vclk_freq == params[freq].pixel_freq ||
+		    vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
+			if (vclk_freq != params[freq].pixel_freq)
+				vic_alternate_clock = true;
+			else
+				vic_alternate_clock = false;
+
+			if (freq == MESON_VCLK_HDMI_ENCI_54000 &&
+			    !hdmi_use_enci)
+				continue;
+
+			if (freq == MESON_VCLK_HDMI_DDR_54000 &&
+			    hdmi_use_enci)
+				continue;
+
+			if (freq == MESON_VCLK_HDMI_DDR_148500 &&
+			    dac_freq == vclk_freq)
+				continue;
+
+			if (freq == MESON_VCLK_HDMI_148500 &&
+			    dac_freq != vclk_freq)
+				continue;
+			break;
+		}
+	}
+
+	if (!params[freq].pixel_freq) {
+		pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
 		return;
 	}
 
@@ -895,6 +940,6 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		       params[freq].pll_od1, params[freq].pll_od2,
 		       params[freq].pll_od3, params[freq].vid_pll_div,
 		       params[freq].vclk_div, hdmi_tx_div, venc_div,
-		       hdmi_use_enci);
+		       hdmi_use_enci, vic_alternate_clock);
 }
 EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 869fa3a..4bd8752 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -32,6 +32,8 @@ enum {
 
 enum drm_mode_status
 meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq);
 
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		      unsigned int vclk_freq, unsigned int venc_freq,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index be76f3d..0ba04f6 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -698,6 +698,132 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
 	},
 };
 
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p24 = {
+	.encp = {
+		.dvi_settings = 0x1,
+		.video_mode = 0x4040,
+		.video_mode_adv = 0x8,
+		/* video_sync_mode */
+		/* video_yc_dly */
+		/* video_rgb_ctrl */
+		.video_filt_ctrl = 0x1000,
+		.video_filt_ctrl_present = true,
+		/* video_ofld_voav_ofst */
+		.yfp1_htime = 140,
+		.yfp2_htime = 140+3840,
+		.max_pxcnt = 3840+1660-1,
+		.hspuls_begin = 2156+1920,
+		.hspuls_end = 44,
+		.hspuls_switch = 44,
+		.vspuls_begin = 140,
+		.vspuls_end = 2059+1920,
+		.vspuls_bline = 0,
+		.vspuls_eline = 4,
+		.havon_begin = 148,
+		.havon_end = 3987,
+		.vavon_bline = 89,
+		.vavon_eline = 2248,
+		/* eqpuls_begin */
+		/* eqpuls_end */
+		/* eqpuls_bline */
+		/* eqpuls_eline */
+		.hso_begin = 44,
+		.hso_end = 2156+1920,
+		.vso_begin = 2100+1920,
+		.vso_end = 2164+1920,
+		.vso_bline = 51,
+		.vso_eline = 53,
+		.vso_eline_present = true,
+		/* sy_val */
+		/* sy2_val */
+		.max_lncnt = 2249,
+	},
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p25 = {
+	.encp = {
+		.dvi_settings = 0x1,
+		.video_mode = 0x4040,
+		.video_mode_adv = 0x8,
+		/* video_sync_mode */
+		/* video_yc_dly */
+		/* video_rgb_ctrl */
+		.video_filt_ctrl = 0x1000,
+		.video_filt_ctrl_present = true,
+		/* video_ofld_voav_ofst */
+		.yfp1_htime = 140,
+		.yfp2_htime = 140+3840,
+		.max_pxcnt = 3840+1440-1,
+		.hspuls_begin = 2156+1920,
+		.hspuls_end = 44,
+		.hspuls_switch = 44,
+		.vspuls_begin = 140,
+		.vspuls_end = 2059+1920,
+		.vspuls_bline = 0,
+		.vspuls_eline = 4,
+		.havon_begin = 148,
+		.havon_end = 3987,
+		.vavon_bline = 89,
+		.vavon_eline = 2248,
+		/* eqpuls_begin */
+		/* eqpuls_end */
+		/* eqpuls_bline */
+		/* eqpuls_eline */
+		.hso_begin = 44,
+		.hso_end = 2156+1920,
+		.vso_begin = 2100+1920,
+		.vso_end = 2164+1920,
+		.vso_bline = 51,
+		.vso_eline = 53,
+		.vso_eline_present = true,
+		/* sy_val */
+		/* sy2_val */
+		.max_lncnt = 2249,
+	},
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p30 = {
+	.encp = {
+		.dvi_settings = 0x1,
+		.video_mode = 0x4040,
+		.video_mode_adv = 0x8,
+		/* video_sync_mode */
+		/* video_yc_dly */
+		/* video_rgb_ctrl */
+		.video_filt_ctrl = 0x1000,
+		.video_filt_ctrl_present = true,
+		/* video_ofld_voav_ofst */
+		.yfp1_htime = 140,
+		.yfp2_htime = 140+3840,
+		.max_pxcnt = 3840+560-1,
+		.hspuls_begin = 2156+1920,
+		.hspuls_end = 44,
+		.hspuls_switch = 44,
+		.vspuls_begin = 140,
+		.vspuls_end = 2059+1920,
+		.vspuls_bline = 0,
+		.vspuls_eline = 4,
+		.havon_begin = 148,
+		.havon_end = 3987,
+		.vavon_bline = 89,
+		.vavon_eline = 2248,
+		/* eqpuls_begin */
+		/* eqpuls_end */
+		/* eqpuls_bline */
+		/* eqpuls_eline */
+		.hso_begin = 44,
+		.hso_end = 2156+1920,
+		.vso_begin = 2100+1920,
+		.vso_end = 2164+1920,
+		.vso_bline = 51,
+		.vso_eline = 53,
+		.vso_eline_present = true,
+		/* sy_val */
+		/* sy2_val */
+		.max_lncnt = 2249,
+	},
+};
+
 struct meson_hdmi_venc_vic_mode {
 	unsigned int vic;
 	union meson_hdmi_venc_mode *mode;
@@ -719,6 +845,9 @@ struct meson_hdmi_venc_vic_mode {
 	{ 34, &meson_hdmi_encp_mode_1080p30 },
 	{ 31, &meson_hdmi_encp_mode_1080p50 },
 	{ 16, &meson_hdmi_encp_mode_1080p60 },
+	{ 93, &meson_hdmi_encp_mode_2160p24 },
+	{ 94, &meson_hdmi_encp_mode_2160p25 },
+	{ 95, &meson_hdmi_encp_mode_2160p30 },
 	{ 0, NULL}, /* sentinel */
 };
 
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 26a0857..e46e05f 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv)
 				 true);
 }
 
+/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
+void meson_viu_osd1_reset(struct meson_drm *priv)
+{
+	uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
+
+	/* Save these 2 registers state */
+	osd1_fifo_ctrl_stat = readl_relaxed(
+				priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+	osd1_ctrl_stat2 = readl_relaxed(
+				priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+	/* Reset OSD1 */
+	writel_bits_relaxed(BIT(0), BIT(0),
+			    priv->io_base + _REG(VIU_SW_RESET));
+	writel_bits_relaxed(BIT(0), 0,
+			    priv->io_base + _REG(VIU_SW_RESET));
+
+	/* Rewrite these registers state lost in the reset */
+	writel_relaxed(osd1_fifo_ctrl_stat,
+		       priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+	writel_relaxed(osd1_ctrl_stat2,
+		       priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+	/* Reload the conversion matrix */
+	meson_viu_load_matrix(priv);
+}
+
 void meson_viu_init(struct meson_drm *priv)
 {
 	uint32_t reg;
@@ -329,6 +356,21 @@ void meson_viu_init(struct meson_drm *priv)
 			    0xff << OSD_REPLACE_SHIFT,
 			    priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
 
+	/* Disable VD1 AFBC */
+	/* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
+	writel_bits_relaxed(0x7 << 16, 0,
+			priv->io_base + _REG(VIU_MISC_CTRL0));
+	/* afbc vd1 set=0 */
+	writel_bits_relaxed(BIT(20), 0,
+			priv->io_base + _REG(VIU_MISC_CTRL0));
+	writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
+
+	writel_relaxed(0x00FF00C0,
+			priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
+	writel_relaxed(0x00FF00C0,
+			priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+
+
 	priv->viu.osd1_enabled = false;
 	priv->viu.osd1_commit = false;
 	priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index 073b191..0f84bdd 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -59,6 +59,7 @@
 #define OSD_REPLACE_EN		BIT(14)
 #define OSD_REPLACE_SHIFT	6
 
+void meson_viu_osd1_reset(struct meson_drm *priv);
 void meson_viu_init(struct meson_drm *priv);
 
 #endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 27356f8..f9efb43 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -51,52 +51,6 @@ void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
 	writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
 }
 
-/*
- * When the output is interlaced, the OSD must switch between
- * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
- * at each vsync.
- * But the vertical scaler can provide such funtionnality if
- * is configured for 2:1 scaling with interlace options enabled.
- */
-void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
-					    struct drm_rect *input)
-{
-	writel_relaxed(BIT(3) /* Enable scaler */ |
-		       BIT(2), /* Select OSD1 */
-			priv->io_base + _REG(VPP_OSD_SC_CTRL0));
-
-	writel_relaxed(((drm_rect_width(input) - 1) << 16) |
-		       (drm_rect_height(input) - 1),
-			priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
-	/* 2:1 scaling */
-	writel_relaxed(((input->x1) << 16) | (input->x2),
-			priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
-	writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
-			priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
-
-	/* 2:1 scaling values */
-	writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
-	writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
-
-	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-
-	writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
-		       (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
-		       (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
-		       (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
-		       (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
-		       BIT(23)	/* osd_prog_interlace */ |
-		       BIT(24), /* Enable vertical scaler */
-			priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
-}
-
-void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
-{
-	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
-	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
-	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-}
-
 static unsigned int vpp_filter_coefs_4point_bspline[] = {
 	0x15561500, 0x14561600, 0x13561700, 0x12561800,
 	0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
@@ -122,6 +76,31 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
 				priv->io_base + _REG(VPP_OSD_SCALE_COEF));
 }
 
+static const uint32_t vpp_filter_coefs_bicubic[] = {
+	0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
+	0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
+	0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
+	0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
+	0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
+	0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
+	0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
+	0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
+	0xf84848f8
+};
+
+static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
+						    const unsigned int *coefs,
+						    bool is_horizontal)
+{
+	int i;
+
+	writel_relaxed(is_horizontal ? BIT(8) : 0,
+			priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+	for (i = 0; i < 33; i++)
+		writel_relaxed(coefs[i],
+				priv->io_base + _REG(VPP_SCALE_COEF));
+}
+
 void meson_vpp_init(struct meson_drm *priv)
 {
 	/* set dummy data default YUV black */
@@ -150,17 +129,34 @@ void meson_vpp_init(struct meson_drm *priv)
 
 	/* Force all planes off */
 	writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
-			    VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+			    VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+			    VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
 			    priv->io_base + _REG(VPP_MISC));
 
+	/* Setup default VD settings */
+	writel_relaxed(4096,
+			priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+	writel_relaxed(4096,
+			priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+
 	/* Disable Scalers */
 	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
 	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
 	writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+	writel_relaxed(4 | (4 << 8) | BIT(15),
+		       priv->io_base + _REG(VPP_SC_MISC));
+
+	writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
 
 	/* Write in the proper filter coefficients. */
 	meson_vpp_write_scaling_filter_coefs(priv,
 				vpp_filter_coefs_4point_bspline, false);
 	meson_vpp_write_scaling_filter_coefs(priv,
 				vpp_filter_coefs_4point_bspline, true);
+
+	/* Write the VD proper filter coefficients. */
+	meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+						false);
+	meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+						true);
 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 04f1dfb..0aaedc5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -212,8 +212,6 @@ struct mga_device {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05570f0..d96a9b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct mga_device, ttm.bdev);
 }
 
-static int
-mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int mgag200_ttm_global_init(struct mga_device *ast)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	global_ref = &ast->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &mgag200_ttm_mem_global_init;
-	global_ref->release = &mgag200_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	ast->ttm.bo_global_ref.mem_glob =
-		ast->ttm.mem_global_ref.object;
-	global_ref = &ast->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&ast->ttm.mem_global_ref);
-		return r;
-	}
-	return 0;
-}
-
-static void
-mgag200_ttm_global_release(struct mga_device *ast)
-{
-	if (ast->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&ast->ttm.mem_global_ref);
-	ast->ttm.mem_global_ref.release = NULL;
-}
-
-
 static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct mgag200_bo *bo;
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
 	struct drm_device *dev = mdev->dev;
 	struct ttm_bo_device *bdev = &mdev->ttm.bdev;
 
-	ret = mgag200_ttm_global_init(mdev);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&mdev->ttm.bdev,
-				 mdev->ttm.bo_global_ref.ref.object,
 				 &mgag200_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
 
 	ttm_bo_device_release(&mdev->ttm.bdev);
 
-	mgag200_ttm_global_release(mdev);
-
 	arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
 				pci_resource_len(dev->pdev, 0));
 	arch_phys_wc_del(mdev->fb_mtrr);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 843a9d4..cf549f1 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,7 @@
 config DRM_MSM
 	tristate "MSM DRM"
 	depends on DRM
-	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+	depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
 	depends on OF && COMMON_CLK
 	depends on MMU
 	select QCOM_MDT_LOADER if ARCH_QCOM
@@ -11,7 +11,7 @@
 	select DRM_PANEL
 	select SHMEM
 	select TMPFS
-	select QCOM_SCM
+	select QCOM_SCM if ARCH_QCOM
 	select WANT_DEV_COREDUMP
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 19ab521..56a70c7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,7 @@
 msm-y := \
 	adreno/adreno_device.o \
 	adreno/adreno_gpu.o \
+	adreno/a2xx_gpu.o \
 	adreno/a3xx_gpu.o \
 	adreno/a4xx_gpu.o \
 	adreno/a5xx_gpu.o \
@@ -14,6 +15,7 @@
 	adreno/a6xx_gpu.o \
 	adreno/a6xx_gmu.o \
 	adreno/a6xx_hfi.o \
+	adreno/a6xx_gpu_state.o \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
@@ -68,11 +70,9 @@
 	disp/dpu1/dpu_hw_util.o \
 	disp/dpu1/dpu_hw_vbif.o \
 	disp/dpu1/dpu_io_util.o \
-	disp/dpu1/dpu_irq.o \
 	disp/dpu1/dpu_kms.o \
 	disp/dpu1/dpu_mdss.o \
 	disp/dpu1/dpu_plane.o \
-	disp/dpu1/dpu_power_handle.o \
 	disp/dpu1/dpu_rm.o \
 	disp/dpu1/dpu_vbif.o \
 	msm_atomic.o \
@@ -90,10 +90,11 @@
 	msm_perf.o \
 	msm_rd.o \
 	msm_ringbuffer.o \
-	msm_submitqueue.o
+	msm_submitqueue.o \
+	msm_gpu_tracepoints.o \
+	msm_gpummu.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
-			  disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 12b0ba2..14eb52f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -239,7 +239,63 @@ enum sq_tex_swiz {
 enum sq_tex_filter {
 	SQ_TEX_FILTER_POINT = 0,
 	SQ_TEX_FILTER_BILINEAR = 1,
-	SQ_TEX_FILTER_BICUBIC = 2,
+	SQ_TEX_FILTER_BASEMAP = 2,
+	SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+	SQ_TEX_ANISO_FILTER_DISABLED = 0,
+	SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+	SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+	SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+	SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+	SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+	SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+	SQ_TEX_DIMENSION_1D = 0,
+	SQ_TEX_DIMENSION_2D = 1,
+	SQ_TEX_DIMENSION_3D = 2,
+	SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+	SQ_TEX_BORDER_COLOR_BLACK = 0,
+	SQ_TEX_BORDER_COLOR_WHITE = 1,
+	SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+	SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+	SQ_TEX_SIGN_UNISIGNED = 0,
+	SQ_TEX_SIGN_SIGNED = 1,
+	SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+	SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+	SQ_TEX_ENDIAN_NONE = 0,
+	SQ_TEX_ENDIAN_8IN16 = 1,
+	SQ_TEX_ENDIAN_8IN32 = 2,
+	SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+	SQ_TEX_CLAMP_POLICY_D3D = 0,
+	SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+	SQ_TEX_NUM_FORMAT_FRAC = 0,
+	SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+	SQ_TEX_TYPE_0 = 0,
+	SQ_TEX_TYPE_1 = 1,
+	SQ_TEX_TYPE_2 = 2,
+	SQ_TEX_TYPE_3 = 3,
 };
 
 #define REG_A2XX_RBBM_PATCH_RELEASE				0x00000001
@@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 }
 
 #define REG_A2XX_MH_MMU_VA_RANGE				0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK		0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT		0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+	return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK			0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT			12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+	return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
 
 #define REG_A2XX_MH_MMU_PT_BASE					0x00000042
 
@@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 #define REG_A2XX_MH_MMU_TRAN_ERROR				0x00000044
 
 #define REG_A2XX_MH_MMU_INVALIDATE				0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL			0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC			0x00000002
 
 #define REG_A2XX_MH_MMU_MPU_BASE				0x00000046
 
@@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 #define REG_A2XX_RBBM_READ_ERROR				0x000003b3
 
 #define REG_A2XX_RBBM_INT_CNTL					0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK			0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK		0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK			0x00080000
 
 #define REG_A2XX_RBBM_INT_STATUS				0x000003b5
 
 #define REG_A2XX_RBBM_INT_ACK					0x000003b6
 
 #define REG_A2XX_MASTER_INT_SIGNAL				0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT			0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT			0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT			0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT			0x80000000
 
 #define REG_A2XX_RBBM_PERIPHID1					0x000003f9
 
@@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
 #define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE			0x02000000
 #define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE			0x04000000
 
+#define REG_A2XX_MH_INTERRUPT_MASK				0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR			0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR			0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT			0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS				0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR				0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1			0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2			0x00000a55
+
 #define REG_A2XX_A220_VSC_BIN_SIZE				0x00000c01
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK			0x0000001f
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT			0
@@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
 #define REG_A2XX_RB_DEBUG_DATA					0x00000f27
 
 #define REG_A2XX_RB_SURFACE_INFO				0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK		0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT		0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+	return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK			0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT		14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+	return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
 
 #define REG_A2XX_RB_COLOR_INFO					0x00002001
 #define A2XX_RB_COLOR_INFO_FORMAT__MASK				0x0000000f
@@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
 #define A2XX_RB_COLOR_INFO_BASE__SHIFT				12
 static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
 {
-	return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+	return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
 }
 
 #define REG_A2XX_RB_DEPTH_INFO					0x00002002
@@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
 #define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT			12
 static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
 {
-	return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+	return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
 }
 
 #define REG_A2XX_A225_RB_COLOR_INFO3				0x00002005
@@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
 #define REG_A2XX_COHER_STATUS_PM4				0x00000a2b
 
 #define REG_A2XX_SQ_TEX_0					0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK				0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT				0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+	return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK				0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT				2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+	return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK				0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT				4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+	return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK				0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT				6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+	return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK				0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT				8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+	return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
 #define A2XX_SQ_TEX_0_CLAMP_X__MASK				0x00001c00
 #define A2XX_SQ_TEX_0_CLAMP_X__SHIFT				10
 static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
 {
 	return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
 }
-#define A2XX_SQ_TEX_0_PITCH__MASK				0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK				0x7fc00000
 #define A2XX_SQ_TEX_0_PITCH__SHIFT				22
 static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
 {
 	return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
 }
+#define A2XX_SQ_TEX_0_TILED					0x00000002
 
 #define REG_A2XX_SQ_TEX_1					0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK				0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT				0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+	return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK				0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT				6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+	return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK			0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT			8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED					0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK			0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT			11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+	return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK			0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT			12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+	return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
 
 #define REG_A2XX_SQ_TEX_2					0x00000002
 #define A2XX_SQ_TEX_2_WIDTH__MASK				0x00001fff
@@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
 {
 	return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
 }
+#define A2XX_SQ_TEX_2_DEPTH__MASK				0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT				26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
 
 #define REG_A2XX_SQ_TEX_3					0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK				0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT				0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+	return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
 #define A2XX_SQ_TEX_3_SWIZ_X__MASK				0x0000000e
 #define A2XX_SQ_TEX_3_SWIZ_X__SHIFT				1
 static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
 {
 	return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
 }
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK				0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT				13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
 #define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK			0x00180000
 #define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT			19
 static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
 {
 	return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
 }
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK				0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT				23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+	return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK			0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT			25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+	return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK				0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT			31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4					0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK			0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT			0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+	return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK			0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT			1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+	return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK			0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT			2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK			0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT			6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK				0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK				0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK				0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT				12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+	return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK			0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT			22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK			0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT			27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5					0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK			0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT			0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+	return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX				0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK				0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT				3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+	return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK				0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT				5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+	return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK				0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT				9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+	return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS				0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK				0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT			12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+	return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
 
 
 #endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 0000000..1f83bc1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+	struct msm_ringbuffer *ring = gpu->rb[0];
+
+	OUT_PKT3(ring, CP_ME_INIT, 18);
+
+	/* All fields present (bits 9:0) */
+	OUT_RING(ring, 0x000003ff);
+	/* Disable/Enable Real-Time Stream processing (present but ignored) */
+	OUT_RING(ring, 0x00000000);
+	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+	OUT_RING(ring, 0x00000000);
+
+	OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+	OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+	OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+	OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+	OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+	OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+	OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+	OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+	/* Vertex and Pixel Shader Start Addresses in instructions
+	 * (3 DWORDS per instruction) */
+	OUT_RING(ring, 0x80000180);
+	/* Maximum Contexts */
+	OUT_RING(ring, 0x00000001);
+	/* Write Confirm Interval and The CP will wait the
+	 * wait_interval * 16 clocks between polling  */
+	OUT_RING(ring, 0x00000000);
+	/* NQ and External Memory Swap */
+	OUT_RING(ring, 0x00000000);
+	/* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+	OUT_RING(ring, 0x200001f2);
+	/* Disable header dumping and Header dump address */
+	OUT_RING(ring, 0x00000000);
+	/* Header dump size */
+	OUT_RING(ring, 0x00000000);
+
+	/* enable protected mode */
+	OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+	OUT_RING(ring, 1);
+
+	gpu->funcs->flush(gpu, ring);
+	return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	dma_addr_t pt_base, tran_error;
+	uint32_t *ptr, len;
+	int i, ret;
+
+	msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+	DBG("%s", gpu->name);
+
+	/* halt ME to avoid ucode upload issues on a20x */
+	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+	gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+	gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+	/* note: kgsl uses 0x00000001 after first reset on a22x */
+	gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+	msleep(30);
+	gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+	if (adreno_is_a225(adreno_gpu))
+		gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+	/* note: kgsl uses 0x0000ffff for a20x */
+	gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+	/* MPU: physical range */
+	gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+	gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+	gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+		A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+		A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+	/* same as parameters in adreno_gpu */
+	gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+		A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+	gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+	gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+	gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+	gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+		A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+		A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+		A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+		A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+		A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+	if (!adreno_is_a20x(adreno_gpu))
+		gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+	gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+	gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+	gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+	gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+	/* note: gsl doesn't set this */
+	gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+	gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+		A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+	gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+		AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+		AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+		AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+		AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+		AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+		AXXX_CP_INT_CNTL_IB1_INT_MASK |
+		AXXX_CP_INT_CNTL_RB_INT_MASK);
+	gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+	gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+		A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+		A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+		A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+	for (i = 3; i <= 5; i++)
+		if ((SZ_16K << i) == adreno_gpu->gmem)
+			break;
+	gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+	ret = adreno_hw_init(gpu);
+	if (ret)
+		return ret;
+
+	/* NOTE: PM4/micro-engine firmware registers look to be the same
+	 * for a2xx and a3xx.. we could possibly push that part down to
+	 * adreno_gpu base class.  Or push both PM4 and PFP but
+	 * parameterize the pfp ucode addr/data registers..
+	 */
+
+	/* Load PM4: */
+	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+	len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+	DBG("loading PM4 ucode version: %x", ptr[1]);
+
+	gpu_write(gpu, REG_AXXX_CP_DEBUG,
+			AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+	gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+	for (i = 1; i < len; i++)
+		gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+	/* Load PFP: */
+	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+	len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+	DBG("loading PFP ucode version: %x", ptr[5]);
+
+	gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+	for (i = 1; i < len; i++)
+		gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+	gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+	/* clear ME_HALT to start micro engine */
+	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+	return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+	int i;
+
+	adreno_dump_info(gpu);
+
+	for (i = 0; i < 8; i++) {
+		printk("CP_SCRATCH_REG%d: %u\n", i,
+			gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+	}
+
+	/* dump registers before resetting gpu, if enabled: */
+	if (hang_debug)
+		a2xx_dump(gpu);
+
+	gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+	gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+	gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+	adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+	DBG("%s", gpu->name);
+
+	adreno_gpu_cleanup(adreno_gpu);
+
+	kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+	/* wait for ringbuffer to drain: */
+	if (!adreno_idle(gpu, gpu->rb[0]))
+		return false;
+
+	/* then wait for GPU to finish: */
+	if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+			A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+		/* TODO maybe we need to reset GPU here to recover from hang? */
+		return false;
+	}
+
+	return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+	uint32_t mstatus, status;
+
+	mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+	if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+		status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+		dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+		dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+			gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+		gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+	}
+
+	if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+		status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+		/* only RB_INT is expected */
+		if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+			dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+		gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+	}
+
+	if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+		status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+		dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+		gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+	}
+
+	msm_gpu_retire(gpu);
+
+	return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+	0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+	0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+	0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+	0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+	0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+	0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+	0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+	0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+	0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+	0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+	0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+	0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+	0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+	0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+	0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+	0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+	0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+	0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+	0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+	0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+	0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+	0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+	0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+	0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+	0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+	~0   /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+	0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+	0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+	0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+	0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+	0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+	0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+	0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+	0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+	0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+	0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+	0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+	0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+	0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+	0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+	0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+	0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+	0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+	0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+	0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+	0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+	0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+	0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+	0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+	0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+	0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+	0x4900, 0x4900, 0x4908, 0x4908,
+	~0   /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+	0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+	0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+	0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+	0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+	0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+	0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+	0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+	0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+	0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+	0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+	0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+	0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+	0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+	0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+	0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+	0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+	0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+	0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+	0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+	0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+	0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+	0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+	0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+	0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+	0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+	0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+	0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+	0x4908, 0x4908,
+	~0   /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+	printk("status:   %08x\n",
+			gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+	adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+	struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state)
+		return ERR_PTR(-ENOMEM);
+
+	adreno_gpu_state_get(gpu, state);
+
+	state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+	return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+	.base = {
+		.get_param = adreno_get_param,
+		.hw_init = a2xx_hw_init,
+		.pm_suspend = msm_gpu_pm_suspend,
+		.pm_resume = msm_gpu_pm_resume,
+		.recover = a2xx_recover,
+		.submit = adreno_submit,
+		.flush = adreno_flush,
+		.active_ring = adreno_active_ring,
+		.irq = a2xx_irq,
+		.destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+		.show = adreno_show,
+#endif
+		.gpu_state_get = a2xx_gpu_state_get,
+		.gpu_state_put = adreno_gpu_state_put,
+	},
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+	struct a2xx_gpu *a2xx_gpu = NULL;
+	struct adreno_gpu *adreno_gpu;
+	struct msm_gpu *gpu;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = priv->gpu_pdev;
+	int ret;
+
+	if (!pdev) {
+		dev_err(dev->dev, "no a2xx device\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+	if (!a2xx_gpu) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	adreno_gpu = &a2xx_gpu->base;
+	gpu = &adreno_gpu->base;
+
+	gpu->perfcntrs = perfcntrs;
+	gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+	if (adreno_is_a20x(adreno_gpu))
+		adreno_gpu->registers = a200_registers;
+	else if (adreno_is_a225(adreno_gpu))
+		adreno_gpu->registers = a225_registers;
+	else
+		adreno_gpu->registers = a220_registers;
+
+	adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+	if (ret)
+		goto fail;
+
+	if (!gpu->aspace) {
+		dev_err(dev->dev, "No memory protection without MMU\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	return gpu;
+
+fail:
+	if (a2xx_gpu)
+		a2xx_destroy(&a2xx_gpu->base.base);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 0000000..02fba2c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+	struct adreno_gpu base;
+	bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a89f7bb..17059f2 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4..c3b4bc6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 	int ret;
 
 	if (!pdev) {
-		dev_err(dev->dev, "no a3xx device\n");
+		DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
 		ret = -ENXIO;
 		goto fail;
 	}
@@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 		 * to not be possible to restrict access, then we must
 		 * implement a cmdstream validator.
 		 */
-		dev_err(dev->dev, "No memory protection without IOMMU\n");
+		DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
 		ret = -ENXIO;
 		goto fail;
 	}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 858690f..9b51e25 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7c4e6dc..18f9a8e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 	int ret;
 
 	if (!pdev) {
-		dev_err(dev->dev, "no a4xx device\n");
+		DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
 		ret = -ENXIO;
 		goto fail;
 	}
@@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 		 * to not be possible to restrict access, then we must
 		 * implement a cmdstream validator.
 		 */
-		dev_err(dev->dev, "No memory protection without IOMMU\n");
+		DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
 		ret = -ENXIO;
 		goto fail;
 	}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index b4944cc..cf4fe14 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index d2127b1..d9af3af 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
 	adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
 
 	if (a5xx_gpu->pm4_bo) {
-		if (a5xx_gpu->pm4_iova)
-			msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+		msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
 		drm_gem_object_put(a5xx_gpu->pm4_bo);
 		a5xx_gpu->pm4_bo = NULL;
 	}
 
 	if (a5xx_gpu->pfp_bo) {
-		if (a5xx_gpu->pfp_iova)
-			msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+		msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
 		drm_gem_object_put(a5xx_gpu->pfp_bo);
 		a5xx_gpu->pfp_bo = NULL;
 	}
@@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
 			minor->debugfs_root, minor);
 
 	if (ret) {
-		dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+		DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 48b5304..d5f5e56 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -20,7 +20,6 @@
 #include <linux/soc/qcom/mdt_loader.h>
 #include <linux/pm_opp.h>
 #include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
 #include <linux/slab.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
 		a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
 			adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
 
+
 		if (IS_ERR(a5xx_gpu->pm4_bo)) {
 			ret = PTR_ERR(a5xx_gpu->pm4_bo);
 			a5xx_gpu->pm4_bo = NULL;
-			dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+			DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
 				ret);
 			return ret;
 		}
+
+		msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
 	}
 
 	if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
 		if (IS_ERR(a5xx_gpu->pfp_bo)) {
 			ret = PTR_ERR(a5xx_gpu->pfp_bo);
 			a5xx_gpu->pfp_bo = NULL;
-			dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+			DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
 				ret);
 			return ret;
 		}
+
+		msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
 	}
 
 	gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 	a5xx_preempt_fini(gpu);
 
 	if (a5xx_gpu->pm4_bo) {
-		if (a5xx_gpu->pm4_iova)
-			msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+		msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
 		drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
 	}
 
 	if (a5xx_gpu->pfp_bo) {
-		if (a5xx_gpu->pfp_iova)
-			msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+		msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
 		drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
 	}
 
 	if (a5xx_gpu->gpmu_bo) {
-		if (a5xx_gpu->gpmu_iova)
-			msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+		msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
 		drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
 	}
 
@@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 
-	dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+	DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
 		ring ? ring->id : -1, ring ? ring->seqno : 0,
 		gpu_read(gpu, REG_A5XX_RBBM_STATUS),
 		gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
 
 static void a5xx_dump(struct msm_gpu *gpu)
 {
-	dev_info(gpu->dev->dev, "status:   %08x\n",
+	DRM_DEV_INFO(gpu->dev->dev, "status:   %08x\n",
 		gpu_read(gpu, REG_A5XX_RBBM_STATUS));
 	adreno_dump(gpu);
 }
@@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
 	u32 *hlsqregs;
 };
 
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
-	readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
-		interval, timeout)
-
 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
 		struct a5xx_crashdumper *dumper)
 {
@@ -1222,19 +1219,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
 		SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
 		&dumper->bo, &dumper->iova);
 
-	if (IS_ERR(dumper->ptr))
-		return PTR_ERR(dumper->ptr);
+	if (!IS_ERR(dumper->ptr))
+		msm_gem_object_set_name(dumper->bo, "crashdump");
 
-	return 0;
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
-		struct a5xx_crashdumper *dumper)
-{
-	msm_gem_put_iova(dumper->bo, gpu->aspace);
-	msm_gem_put_vaddr(dumper->bo);
-
-	drm_gem_object_put(dumper->bo);
+	return PTR_ERR_OR_ZERO(dumper->ptr);
 }
 
 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1329,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
 
 	if (a5xx_crashdumper_run(gpu, &dumper)) {
 		kfree(a5xx_state->hlsqregs);
-		a5xx_crashdumper_free(gpu, &dumper);
+		msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
 		return;
 	}
 
@@ -1337,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
 	memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
 		count * sizeof(u32));
 
-	a5xx_crashdumper_free(gpu, &dumper);
+	msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
 }
 
 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1508,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
 	int ret;
 
 	if (!pdev) {
-		dev_err(dev->dev, "No A5XX device is defined\n");
+		DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
 		return ERR_PTR(-ENXIO);
 	}
 
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7a41e1c..70e65c9 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
 		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
 		&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
 	if (IS_ERR(ptr))
-		goto err;
+		return;
+
+	msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
 
 	while (cmds_size > 0) {
 		int i;
@@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
 
 	msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
 	a5xx_gpu->gpmu_dwords = dwords;
-
-	return;
-err:
-	if (a5xx_gpu->gpmu_iova)
-		msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
-	if (a5xx_gpu->gpmu_bo)
-		drm_gem_object_put(a5xx_gpu->gpmu_bo);
-
-	a5xx_gpu->gpmu_bo = NULL;
-	a5xx_gpu->gpmu_iova = 0;
-	a5xx_gpu->gpmu_dwords = 0;
 }
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 4c357ea..3d62310 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
 	if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
 		return;
 
-	dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+	DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
 	queue_work(priv->wq, &gpu->recover_work);
 }
 
@@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
 	status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
 	if (unlikely(status)) {
 		set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
-		dev_err(dev->dev, "%s: Preemption failed to complete\n",
+		DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
 			gpu->name);
 		queue_work(priv->wq, &gpu->recover_work);
 		return;
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
 	if (IS_ERR(ptr))
 		return PTR_ERR(ptr);
 
+	msm_gem_object_set_name(bo, "preempt");
+
 	a5xx_gpu->preempt_bo[ring->id] = bo;
 	a5xx_gpu->preempt_iova[ring->id] = iova;
 	a5xx_gpu->preempt[ring->id] = ptr;
@@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
 	int i;
 
-	for (i = 0; i < gpu->nr_rings; i++) {
-		if (!a5xx_gpu->preempt_bo[i])
-			continue;
-
-		msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
-		if (a5xx_gpu->preempt_iova[i])
-			msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
-		drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
-		a5xx_gpu->preempt_bo[i] = NULL;
-	}
+	for (i = 0; i < gpu->nr_rings; i++)
+		msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
 }
 
 void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index a6f7c40..f44553e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
 	PERF_VFDP_VS_STAGE_WAVES = 22,
 };
 
-enum a6xx_hslq_perfcounter_select {
+enum a6xx_hlsq_perfcounter_select {
 	PERF_HLSQ_BUSY_CYCLES = 0,
 	PERF_HLSQ_STALL_CYCLES_UCHE = 1,
 	PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
 #define A6XX_GRAS_LRZ_CNTL_ENABLE				0x00000001
 #define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE				0x00000002
 #define A6XX_GRAS_LRZ_CNTL_GREATER				0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3					0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4					0x00000010
 
 #define REG_A6XX_GRAS_UNKNOWN_8101				0x00008101
 
@@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
 #define REG_A6XX_GRAS_UNKNOWN_8110				0x00008110
 
 #define REG_A6XX_GRAS_2D_BLIT_CNTL				0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK		0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT		8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+	return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR				0x00010000
 
 #define REG_A6XX_GRAS_2D_SRC_TL_X				0x00008401
 #define A6XX_GRAS_2D_SRC_TL_X_X__MASK				0x00ffff00
@@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
 	return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
 }
 #define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND			0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE			0x00000400
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK			0xffff0000
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT			16
 static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 #define REG_A6XX_RB_SAMPLE_COUNT_CONTROL			0x00008891
 #define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY			0x00000002
 
+#define REG_A6XX_RB_LRZ_CNTL					0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE					0x00000001
+
 #define REG_A6XX_RB_UNKNOWN_88D0				0x000088d0
 
 #define REG_A6XX_RB_BLIT_SCISSOR_TL				0x000088d1
@@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
 	return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
 }
 
+#define REG_A6XX_RB_MSAA_CNTL					0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK				0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT			3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
 #define REG_A6XX_RB_BLIT_BASE_GMEM				0x000088d6
 
 #define REG_A6XX_RB_BLIT_DST_INFO				0x000088d7
@@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
 	return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
 }
 #define A6XX_RB_BLIT_DST_INFO_FLAGS				0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK			0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT			3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK		0x00007f80
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT		7
 static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
 {
 	return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
 }
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR				0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01				0x00008c01
 
 #define REG_A6XX_RB_2D_DST_INFO					0x00008c17
 #define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK			0x000000ff
@@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_BLEND_CNTL					0x0000a989
 #define A6XX_SP_BLEND_CNTL_ENABLED				0x00000001
 #define A6XX_SP_BLEND_CNTL_UNK8					0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE			0x00000400
 
 #define REG_A6XX_SP_SRGB_CNTL					0x0000a98a
 #define A6XX_SP_SRGB_CNTL_SRGB_MRT0				0x00000001
@@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
 
 #define REG_A6XX_SP_UNKNOWN_AB20				0x0000ab20
 
+#define REG_A6XX_SP_UNKNOWN_ACC0				0x0000acc0
+
 #define REG_A6XX_SP_UNKNOWN_AE00				0x0000ae00
 
 #define REG_A6XX_SP_UNKNOWN_AE03				0x0000ae03
@@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
 	return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
 }
 #define A6XX_SP_PS_2D_SRC_INFO_FLAGS				0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER				0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE				0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK			0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT			0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+	return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK			0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT			15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+	return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
 
 #define REG_A6XX_SP_PS_2D_SRC_LO				0x0000b4c2
 
 #define REG_A6XX_SP_PS_2D_SRC_HI				0x0000b4c3
 
+#define REG_A6XX_SP_PS_2D_SRC_PITCH				0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK			0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT			9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+	return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
 #define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO				0x0000b4ca
 
 #define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI				0x0000b4cb
@@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
 {
 	return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A6XX_TEX_CONST_0_SAMPLES__MASK				0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT				20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+	return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
 #define A6XX_TEX_CONST_0_FMT__MASK				0x3fc00000
 #define A6XX_TEX_CONST_0_FMT__SHIFT				22
 static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
 
 #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2			0x00000030
 
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0			0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1			0x00000002
+
 
 #endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d4e98e5..c58e953 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-/* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
 {
-	u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+	u32 val;
+
+	/* This can be called from gpu state code so make sure GMU is valid */
+	if (IS_ERR_OR_NULL(gmu->mmio))
+		return false;
+
+	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+	return !(val &
+		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
+/* Check to see if the GX rail is still powered */
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+	u32 val;
+
+	/* This can be called from gpu state code so make sure GMU is valid */
+	if (IS_ERR_OR_NULL(gmu->mmio))
+		return false;
+
+	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
 
 	return !(val &
 		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
 		val == 0xbabeface, 100, 10000);
 
 	if (ret)
-		dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
 
 	return ret;
 }
@@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
 		val & 1, 100, 10000);
 	if (ret)
-		dev_err(gmu->dev, "Unable to start the HFI queues\n");
+		DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
 
 	return ret;
 }
@@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 		val & (1 << ack), 100, 10000);
 
 	if (ret)
-		dev_err(gmu->dev,
+		DRM_DEV_ERROR(gmu->dev,
 			"Timeout waiting for GMU OOB set %s: 0x%x\n",
 				name,
 				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
 		(val & 0x38) == 0x28, 1, 100);
 
 	if (ret) {
-		dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+		DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 	}
 
@@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
 		(val & 0x04), 100, 10000);
 
 	if (ret)
-		dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+		DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 }
 
@@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
 		/* Check to see if the GMU really did slumber */
 		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
 			!= 0x0f) {
-			dev_err(gmu->dev, "The GMU did not go into slumber\n");
+			DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
 			ret = -ETIMEDOUT;
 		}
 	}
@@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
 		val & (1 << 1), 100, 10000);
 	if (ret) {
-		dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+		DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
 		return ret;
 	}
 
 	ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
 		!val, 100, 10000);
 
-	if (!ret) {
-		gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
-		/* Re-enable the power counter */
-		gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
-		return 0;
+	if (ret) {
+		DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+		return ret;
 	}
 
-	dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
-	return ret;
+	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+	/* Set up CX GMU counter 0 to count busy ticks */
+	gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+	gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+	/* Enable the power counter */
+	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+	return 0;
 }
 
 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
 	ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
 		val, val & (1 << 16), 100, 10000);
 	if (ret)
-		dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
 
 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 }
@@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
 
 		/* Sanity check the size of the firmware that was loaded */
 		if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
-			dev_err(gmu->dev,
+			DRM_DEV_ERROR(gmu->dev,
 				"GMU firmware is bigger than the available region\n");
 			return -EINVAL;
 		}
@@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
 		 */
 
 		if (ret)
-			dev_err(gmu->dev,
+			DRM_DEV_ERROR(gmu->dev,
 				"Unable to slumber GMU: status = 0%x/0%x\n",
 				gmu_read(gmu,
 					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 			IOMMU_READ | IOMMU_WRITE);
 
 		if (ret) {
-			dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
 
 			for (i = i - 1 ; i >= 0; i--)
 				iommu_unmap(gmu->domain,
@@ -969,12 +994,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
 		}
 
 		if (j == pri_count) {
-			dev_err(dev,
+			DRM_DEV_ERROR(dev,
 				"Level %u not found in in the RPMh list\n",
 					level);
-			dev_err(dev, "Available levels:\n");
+			DRM_DEV_ERROR(dev, "Available levels:\n");
 			for (j = 0; j < pri_count; j++)
-				dev_err(dev, "  %u\n", pri[j]);
+				DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
 
 			return -EINVAL;
 		}
@@ -1081,7 +1106,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
 	 */
 	ret = dev_pm_opp_of_add_table(gmu->dev);
 	if (ret) {
-		dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+		DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
 		return ret;
 	}
 
@@ -1122,13 +1147,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
 			IORESOURCE_MEM, name);
 
 	if (!res) {
-		dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+		DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
 		return ERR_PTR(-EINVAL);
 	}
 
 	ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
 	if (!ret) {
-		dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+		DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -1145,7 +1170,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
 	ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
 		name, gmu);
 	if (ret) {
-		dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+		DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 35f765a..c721d91 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
 void a6xx_hfi_stop(struct a6xx_gmu *gmu);
 
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
 #endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index db56f26..1cc1c13 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 631257c..fefe773 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -4,6 +4,7 @@
 
 #include "msm_gem.h"
 #include "msm_mmu.h"
+#include "msm_gpu_trace.h"
 #include "a6xx_gpu.h"
 #include "a6xx_gmu.xml.h"
 
@@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 	gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
 }
 
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+		u64 iova)
+{
+	OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+	OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+	OUT_RING(ring, lower_32_bits(iova));
+	OUT_RING(ring, upper_32_bits(iova));
+}
+
 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 	struct msm_file_private *ctx)
 {
+	unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
 	struct msm_drm_private *priv = gpu->dev->dev_private;
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 	struct msm_ringbuffer *ring = submit->ring;
 	unsigned int i;
 
+	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+		rbmemptr_stats(ring, index, cpcycles_start));
+
+	/*
+	 * For PM4 the GMU register offsets are calculated from the base of the
+	 * GPU registers so we need to add 0x1a800 to the register value on A630
+	 * to get the right value from PM4.
+	 */
+	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+		rbmemptr_stats(ring, index, alwayson_start));
+
 	/* Invalidate CCU depth and color */
 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
 	OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		}
 	}
 
+	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+		rbmemptr_stats(ring, index, cpcycles_end));
+	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+		rbmemptr_stats(ring, index, alwayson_end));
+
 	/* Write the fence to the scratch register */
 	OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
 	OUT_RING(ring, submit->seqno);
@@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
 	OUT_RING(ring, submit->seqno);
 
+	trace_msm_gpu_submit_flush(submit,
+		gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+			REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
 	a6xx_flush(gpu, ring);
 }
 
@@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
 
 			return ret;
 		}
+
+		msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
 	}
 
 	gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
 	/* Select CP0 to always count cycles */
 	gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
 
-	/* FIXME: not sure if this should live here or in a6xx_gmu.c */
-	gmu_write(&a6xx_gpu->gmu,  REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
-		0xff000000);
-	gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
-		0xff, 0x20);
-	gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
-		0x01);
-
 	gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
 	gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
 	gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -481,7 +508,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
 
 static void a6xx_dump(struct msm_gpu *gpu)
 {
-	dev_info(&gpu->pdev->dev, "status:   %08x\n",
+	DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
 			gpu_read(gpu, REG_A6XX_RBBM_STATUS));
 	adreno_dump(gpu);
 }
@@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
 	adreno_dump_info(gpu);
 
 	for (i = 0; i < 8; i++)
-		dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+		DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
 
 	if (hang_debug)
@@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
 };
 
-static const u32 a6xx_registers[] = {
-	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
-	0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
-	0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
-	0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
-	0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
-	0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
-	0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
-	0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
-	0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
-	0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
-	0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
-	0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
-	0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
-	0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
-	0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
-	0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
-	0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
-	0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
-	0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
-	0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
-	0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
-	0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
-	0xa610, 0xa617, 0xa630, 0xa630,
-	~0
-};
-
 static int a6xx_pm_resume(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 	return 0;
 }
 
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
-		struct drm_printer *p)
-{
-	adreno_show(gpu, state, p);
-}
-#endif
-
 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
 	if (a6xx_gpu->sqe_bo) {
-		if (a6xx_gpu->sqe_iova)
-			msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+		msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
 		drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
 	}
 
@@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
 		.gpu_busy = a6xx_gpu_busy,
 		.gpu_get_freq = a6xx_gmu_get_freq,
 		.gpu_set_freq = a6xx_gmu_set_freq,
+		.gpu_state_get = a6xx_gpu_state_get,
+		.gpu_state_put = a6xx_gpu_state_put,
 	},
 	.get_timestamp = a6xx_get_timestamp,
 };
@@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
 	adreno_gpu = &a6xx_gpu->base;
 	gpu = &adreno_gpu->base;
 
-	adreno_gpu->registers = a6xx_registers;
+	adreno_gpu->registers = NULL;
 	adreno_gpu->reg_offsets = a6xx_register_offsets;
 
 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4127dce..528a4cf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
 
 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+		struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
 #endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 0000000..e686331
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+	const void *handle;
+	u32 *data;
+};
+
+struct a6xx_gpu_state {
+	struct msm_gpu_state base;
+
+	struct a6xx_gpu_state_obj *gmu_registers;
+	int nr_gmu_registers;
+
+	struct a6xx_gpu_state_obj *registers;
+	int nr_registers;
+
+	struct a6xx_gpu_state_obj *shaders;
+	int nr_shaders;
+
+	struct a6xx_gpu_state_obj *clusters;
+	int nr_clusters;
+
+	struct a6xx_gpu_state_obj *dbgahb_clusters;
+	int nr_dbgahb_clusters;
+
+	struct a6xx_gpu_state_obj *indexed_regs;
+	int nr_indexed_regs;
+
+	struct a6xx_gpu_state_obj *debugbus;
+	int nr_debugbus;
+
+	struct a6xx_gpu_state_obj *vbif_debugbus;
+
+	struct a6xx_gpu_state_obj *cx_debugbus;
+	int nr_cx_debugbus;
+
+	struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+	in[0] = val;
+	in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+	return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+	in[0] = target;
+	in[1] = (((u64) reg) << 44 | dwords);
+
+	return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+	in[0] = 0;
+	in[1] = 0;
+
+	return 2;
+}
+
+struct a6xx_crashdumper {
+	void *ptr;
+	struct drm_gem_object *bo;
+	u64 iova;
+};
+
+struct a6xx_state_memobj {
+	struct list_head node;
+	unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+	struct a6xx_state_memobj *obj =
+		kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+	if (!obj)
+		return NULL;
+
+	list_add_tail(&obj->node, &a6xx_state->objs);
+	return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+		size_t size)
+{
+	void *dst = state_kcalloc(a6xx_state, 1, size);
+
+	if (dst)
+		memcpy(dst, src, size);
+	return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE  (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+		struct a6xx_crashdumper *dumper)
+{
+	dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+		SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+		&dumper->bo, &dumper->iova);
+
+	if (!IS_ERR(dumper->ptr))
+		msm_gem_object_set_name(dumper->bo, "crashdump");
+
+	return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+		struct a6xx_crashdumper *dumper)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+	u32 val;
+	int ret;
+
+	if (IS_ERR_OR_NULL(dumper->ptr))
+		return -EINVAL;
+
+	if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+		return -EINVAL;
+
+	/* Make sure all pending memory writes are posted */
+	wmb();
+
+	gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+		REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+	gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+	ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+		val & 0x02, 100, 10000);
+
+	gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+	return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+		u32 *data)
+{
+	u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+		A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+	/* Wait 1 us to make sure the data is flowing */
+	udelay(1);
+
+	data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+	data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+	return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+	msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+	msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+		u32 *data)
+{
+	u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+		A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+	cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+	/* Wait 1 us to make sure the data is flowing */
+	udelay(1);
+
+	data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+	data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+	return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+		u32 reg, int count, u32 *data)
+{
+	int i;
+
+	gpu_write(gpu, ctrl0, reg);
+
+	for (i = 0; i < count; i++) {
+		gpu_write(gpu, ctrl1, i);
+		data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+	}
+
+	return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+	((16 * AXI_ARB_BLOCKS) + \
+	 (18 * XIN_AXI_BLOCKS) + \
+	 (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		struct a6xx_gpu_state_obj *obj)
+{
+	u32 clk, *ptr;
+	int i;
+
+	obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+		sizeof(u32));
+	if (!obj->data)
+		return;
+
+	obj->handle = NULL;
+
+	/* Get the current clock setting */
+	clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+	/* Force on the bus so we can read it */
+	gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+		clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+	/* We will read from BUS2 first, so disable BUS1 */
+	gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+	/* Enable the VBIF bus for reading */
+	gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+	ptr = obj->data;
+
+	for (i = 0; i < AXI_ARB_BLOCKS; i++)
+		ptr += vbif_debugbus_read(gpu,
+			REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+			REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+			1 << (i + 16), 16, ptr);
+
+	for (i = 0; i < XIN_AXI_BLOCKS; i++)
+		ptr += vbif_debugbus_read(gpu,
+			REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+			REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+			1 << i, 18, ptr);
+
+	/* Stop BUS2 so we can turn on BUS1 */
+	gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+	for (i = 0; i < XIN_CORE_BLOCKS; i++)
+		ptr += vbif_debugbus_read(gpu,
+			REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+			REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+			1 << i, 12, ptr);
+
+	/* Restore the VBIF clock setting */
+	gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_debugbus_block *block,
+		struct a6xx_gpu_state_obj *obj)
+{
+	int i;
+	u32 *ptr;
+
+	obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+	if (!obj->data)
+		return;
+
+	obj->handle = block;
+
+	for (ptr = obj->data, i = 0; i < block->count; i++)
+		ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_debugbus_block *block,
+		struct a6xx_gpu_state_obj *obj)
+{
+	int i;
+	u32 *ptr;
+
+	obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+	if (!obj->data)
+		return;
+
+	obj->handle = block;
+
+	for (ptr = obj->data, i = 0; i < block->count; i++)
+		ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state)
+{
+	struct resource *res;
+	void __iomem *cxdbg = NULL;
+
+	/* Set up the GX debug bus */
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+		A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+		A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+	gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+	/* Set up the CX debug bus - it lives elsewhere in the system so do a
+	 * temporary ioremap for the registers
+	 */
+	res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+			"cx_dbgc");
+
+	if (res)
+		cxdbg = ioremap(res->start, resource_size(res));
+
+	if (cxdbg) {
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+			A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+			A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+			0x76543210);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+			0xFEDCBA98);
+
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+		cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+	}
+
+	a6xx_state->debugbus = state_kcalloc(a6xx_state,
+		ARRAY_SIZE(a6xx_debugbus_blocks),
+		sizeof(*a6xx_state->debugbus));
+
+	if (a6xx_state->debugbus) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+			a6xx_get_debugbus_block(gpu,
+				a6xx_state,
+				&a6xx_debugbus_blocks[i],
+				&a6xx_state->debugbus[i]);
+
+		a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+	}
+
+	a6xx_state->vbif_debugbus =
+		state_kcalloc(a6xx_state, 1,
+			sizeof(*a6xx_state->vbif_debugbus));
+
+	if (a6xx_state->vbif_debugbus)
+		a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+			a6xx_state->vbif_debugbus);
+
+	if (cxdbg) {
+		a6xx_state->cx_debugbus =
+			state_kcalloc(a6xx_state,
+			ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+			sizeof(*a6xx_state->cx_debugbus));
+
+		if (a6xx_state->cx_debugbus) {
+			int i;
+
+			for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+				a6xx_get_cx_debugbus_block(cxdbg,
+					a6xx_state,
+					&a6xx_cx_debugbus_blocks[i],
+					&a6xx_state->cx_debugbus[i]);
+
+			a6xx_state->nr_cx_debugbus =
+				ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+		}
+
+		iounmap(cxdbg);
+	}
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_dbgahb_cluster *dbgahb,
+		struct a6xx_gpu_state_obj *obj,
+		struct a6xx_crashdumper *dumper)
+{
+	u64 *in = dumper->ptr;
+	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+	size_t datasize;
+	int i, regcount = 0;
+
+	for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+		int j;
+
+		in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+			(dbgahb->statetype + i * 2) << 8);
+
+		for (j = 0; j < dbgahb->count; j += 2) {
+			int count = RANGE(dbgahb->registers, j);
+			u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+				dbgahb->registers[j] - (dbgahb->base >> 2);
+
+			in += CRASHDUMP_READ(in, offset, count, out);
+
+			out += count * sizeof(u32);
+
+			if (i == 0)
+				regcount += count;
+		}
+	}
+
+	CRASHDUMP_FINI(in);
+
+	datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+	if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+		return;
+
+	if (a6xx_crashdumper_run(gpu, dumper))
+		return;
+
+	obj->handle = dbgahb;
+	obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+		datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		struct a6xx_crashdumper *dumper)
+{
+	int i;
+
+	a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+		ARRAY_SIZE(a6xx_dbgahb_clusters),
+		sizeof(*a6xx_state->dbgahb_clusters));
+
+	if (!a6xx_state->dbgahb_clusters)
+		return;
+
+	a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+		a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+			&a6xx_dbgahb_clusters[i],
+			&a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_cluster *cluster,
+		struct a6xx_gpu_state_obj *obj,
+		struct a6xx_crashdumper *dumper)
+{
+	u64 *in = dumper->ptr;
+	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+	size_t datasize;
+	int i, regcount = 0;
+
+	/* Some clusters need a selector register to be programmed too */
+	if (cluster->sel_reg)
+		in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+	for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+		int j;
+
+		in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+			(cluster->id << 8) | (i << 4) | i);
+
+		for (j = 0; j < cluster->count; j += 2) {
+			int count = RANGE(cluster->registers, j);
+
+			in += CRASHDUMP_READ(in, cluster->registers[j],
+				count, out);
+
+			out += count * sizeof(u32);
+
+			if (i == 0)
+				regcount += count;
+		}
+	}
+
+	CRASHDUMP_FINI(in);
+
+	datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+	if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+		return;
+
+	if (a6xx_crashdumper_run(gpu, dumper))
+		return;
+
+	obj->handle = cluster;
+	obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+		datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		struct a6xx_crashdumper *dumper)
+{
+	int i;
+
+	a6xx_state->clusters = state_kcalloc(a6xx_state,
+		ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+	if (!a6xx_state->clusters)
+		return;
+
+	a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+		a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+			&a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_shader_block *block,
+		struct a6xx_gpu_state_obj *obj,
+		struct a6xx_crashdumper *dumper)
+{
+	u64 *in = dumper->ptr;
+	size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+	int i;
+
+	if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+		return;
+
+	for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+		in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+			(block->type << 8) | i);
+
+		in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+			block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+	}
+
+	CRASHDUMP_FINI(in);
+
+	if (a6xx_crashdumper_run(gpu, dumper))
+		return;
+
+	obj->handle = block;
+	obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+		datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		struct a6xx_crashdumper *dumper)
+{
+	int i;
+
+	a6xx_state->shaders = state_kcalloc(a6xx_state,
+		ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+	if (!a6xx_state->shaders)
+		return;
+
+	a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+		a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+			&a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_registers *regs,
+		struct a6xx_gpu_state_obj *obj,
+		struct a6xx_crashdumper *dumper)
+
+{
+	u64 *in = dumper->ptr;
+	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+	int i, regcount = 0;
+
+	in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+	for (i = 0; i < regs->count; i += 2) {
+		u32 count = RANGE(regs->registers, i);
+		u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+			regs->registers[i] - (regs->val0 >> 2);
+
+		in += CRASHDUMP_READ(in, offset, count, out);
+
+		out += count * sizeof(u32);
+		regcount += count;
+	}
+
+	CRASHDUMP_FINI(in);
+
+	if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+		return;
+
+	if (a6xx_crashdumper_run(gpu, dumper))
+		return;
+
+	obj->handle = regs;
+	obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+		regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_registers *regs,
+		struct a6xx_gpu_state_obj *obj,
+		struct a6xx_crashdumper *dumper)
+
+{
+	u64 *in = dumper->ptr;
+	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+	int i, regcount = 0;
+
+	/* Some blocks might need to program a selector register first */
+	if (regs->val0)
+		in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+	for (i = 0; i < regs->count; i += 2) {
+		u32 count = RANGE(regs->registers, i);
+
+		in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+		out += count * sizeof(u32);
+		regcount += count;
+	}
+
+	CRASHDUMP_FINI(in);
+
+	if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+		return;
+
+	if (a6xx_crashdumper_run(gpu, dumper))
+		return;
+
+	obj->handle = regs;
+	obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+		regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_registers *regs,
+		struct a6xx_gpu_state_obj *obj)
+{
+	int i, regcount = 0, index = 0;
+
+	for (i = 0; i < regs->count; i += 2)
+		regcount += RANGE(regs->registers, i);
+
+	obj->handle = (const void *) regs;
+	obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+	if (!obj->data)
+		return;
+
+	for (i = 0; i < regs->count; i += 2) {
+		u32 count = RANGE(regs->registers, i);
+		int j;
+
+		for (j = 0; j < count; j++)
+			obj->data[index++] = gpu_read(gpu,
+				regs->registers[i] + j);
+	}
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_registers *regs,
+		struct a6xx_gpu_state_obj *obj)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+	int i, regcount = 0, index = 0;
+
+	for (i = 0; i < regs->count; i += 2)
+		regcount += RANGE(regs->registers, i);
+
+	obj->handle = (const void *) regs;
+	obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+	if (!obj->data)
+		return;
+
+	for (i = 0; i < regs->count; i += 2) {
+		u32 count = RANGE(regs->registers, i);
+		int j;
+
+		for (j = 0; j < count; j++)
+			obj->data[index++] = gmu_read(gmu,
+				regs->registers[i] + j);
+	}
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state)
+{
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+	a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+		2, sizeof(*a6xx_state->gmu_registers));
+
+	if (!a6xx_state->gmu_registers)
+		return;
+
+	a6xx_state->nr_gmu_registers = 2;
+
+	/* Get the CX GMU registers from AHB */
+	_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+		&a6xx_state->gmu_registers[0]);
+
+	if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+		return;
+
+	/* Set the fence to ALLOW mode so we can access the registers */
+	gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+	_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+		&a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		struct a6xx_crashdumper *dumper)
+{
+	int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+		ARRAY_SIZE(a6xx_reglist) +
+		ARRAY_SIZE(a6xx_hlsq_reglist);
+	int index = 0;
+
+	a6xx_state->registers = state_kcalloc(a6xx_state,
+		count, sizeof(*a6xx_state->registers));
+
+	if (!a6xx_state->registers)
+		return;
+
+	a6xx_state->nr_registers = count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+		a6xx_get_ahb_gpu_registers(gpu,
+			a6xx_state, &a6xx_ahb_reglist[i],
+			&a6xx_state->registers[index++]);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+		a6xx_get_crashdumper_registers(gpu,
+			a6xx_state, &a6xx_reglist[i],
+			&a6xx_state->registers[index++],
+			dumper);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+		a6xx_get_crashdumper_hlsq_registers(gpu,
+			a6xx_state, &a6xx_hlsq_reglist[i],
+			&a6xx_state->registers[index++],
+			dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state,
+		const struct a6xx_indexed_registers *indexed,
+		struct a6xx_gpu_state_obj *obj)
+{
+	int i;
+
+	obj->handle = (const void *) indexed;
+	obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+	if (!obj->data)
+		return;
+
+	/* All the indexed banks start at address 0 */
+	gpu_write(gpu, indexed->addr, 0);
+
+	/* Read the data - each read increments the internal address by 1 */
+	for (i = 0; i < indexed->count; i++)
+		obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+		struct a6xx_gpu_state *a6xx_state)
+{
+	u32 mempool_size;
+	int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+	int i;
+
+	a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+		sizeof(a6xx_state->indexed_regs));
+	if (!a6xx_state->indexed_regs)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+		a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+			&a6xx_state->indexed_regs[i]);
+
+	/* Set the CP mempool size to 0 to stabilize it while dumping */
+	mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+	gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+	/* Get the contents of the CP mempool */
+	a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+		&a6xx_state->indexed_regs[i]);
+
+	/*
+	 * Offset 0x2000 in the mempool is the size - copy the saved size over
+	 * so the data is consistent
+	 */
+	a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+	/* Restore the size in the hardware */
+	gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+	a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+	struct a6xx_crashdumper dumper = { 0 };
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+	struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+		GFP_KERNEL);
+
+	if (!a6xx_state)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&a6xx_state->objs);
+
+	/* Get the generic state from the adreno core */
+	adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+	a6xx_get_gmu_registers(gpu, a6xx_state);
+
+	/* If GX isn't on the rest of the data isn't going to be accessible */
+	if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+		return &a6xx_state->base;
+
+	/* Get the banks of indexed registers */
+	a6xx_get_indexed_registers(gpu, a6xx_state);
+
+	/* Try to initialize the crashdumper */
+	if (!a6xx_crashdumper_init(gpu, &dumper)) {
+		a6xx_get_registers(gpu, a6xx_state, &dumper);
+		a6xx_get_shaders(gpu, a6xx_state, &dumper);
+		a6xx_get_clusters(gpu, a6xx_state, &dumper);
+		a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+		msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+	}
+
+	a6xx_get_debugbus(gpu, a6xx_state);
+
+	return  &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+	struct a6xx_state_memobj *obj, *tmp;
+	struct msm_gpu_state *state = container_of(kref,
+			struct msm_gpu_state, ref);
+	struct a6xx_gpu_state *a6xx_state = container_of(state,
+			struct a6xx_gpu_state, base);
+
+	list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+		kfree(obj);
+
+	adreno_gpu_state_destroy(state);
+	kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+	if (IS_ERR_OR_NULL(state))
+		return 1;
+
+	return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+		struct drm_printer *p)
+{
+	int i, index = 0;
+
+	if (!data)
+		return;
+
+	for (i = 0; i < count; i += 2) {
+		u32 count = RANGE(registers, i);
+		u32 offset = registers[i];
+		int j;
+
+		for (j = 0; j < count; index++, offset++, j++) {
+			if (data[index] == 0xdeafbead)
+				continue;
+
+			drm_printf(p, "  - { offset: 0x%06x, value: 0x%08x }\n",
+				offset << 2, data[index]);
+		}
+	}
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+	char out[ASCII85_BUFSZ];
+	long i, l, datalen = 0;
+
+	for (i = 0; i < len >> 2; i++) {
+		if (data[i])
+			datalen = (i + 1) << 2;
+	}
+
+	if (datalen == 0)
+		return;
+
+	drm_puts(p, "    data: !!ascii85 |\n");
+	drm_puts(p, "      ");
+
+
+	l = ascii85_encode_len(datalen);
+
+	for (i = 0; i < l; i++)
+		drm_puts(p, ascii85_encode(data[i], out));
+
+	drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+	drm_puts(p, fmt);
+	drm_puts(p, name);
+	drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+		struct drm_printer *p)
+{
+	const struct a6xx_shader_block *block = obj->handle;
+	int i;
+
+	if (!obj->handle)
+		return;
+
+	print_name(p, "  - type: ", block->name);
+
+	for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+		drm_printf(p, "    - bank: %d\n", i);
+		drm_printf(p, "      size: %d\n", block->size);
+
+		if (!obj->data)
+			continue;
+
+		print_ascii85(p, block->size << 2,
+			obj->data + (block->size * i));
+	}
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+		struct drm_printer *p)
+{
+	int ctx, index = 0;
+
+	for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+		int j;
+
+		drm_printf(p, "    - context: %d\n", ctx);
+
+		for (j = 0; j < size; j += 2) {
+			u32 count = RANGE(registers, j);
+			u32 offset = registers[j];
+			int k;
+
+			for (k = 0; k < count; index++, offset++, k++) {
+				if (data[index] == 0xdeafbead)
+					continue;
+
+				drm_printf(p, "      - { offset: 0x%06x, value: 0x%08x }\n",
+					offset << 2, data[index]);
+			}
+		}
+	}
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+		struct drm_printer *p)
+{
+	const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+	if (dbgahb) {
+		print_name(p, "  - cluster-name: ", dbgahb->name);
+		a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+			obj->data, p);
+	}
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+		struct drm_printer *p)
+{
+	const struct a6xx_cluster *cluster = obj->handle;
+
+	if (cluster) {
+		print_name(p, "  - cluster-name: ", cluster->name);
+		a6xx_show_cluster_data(cluster->registers, cluster->count,
+			obj->data, p);
+	}
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+		struct drm_printer *p)
+{
+	const struct a6xx_indexed_registers *indexed = obj->handle;
+
+	if (!indexed)
+		return;
+
+	print_name(p, "  - regs-name: ", indexed->name);
+	drm_printf(p, "    dwords: %d\n", indexed->count);
+
+	print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+		u32 *data, struct drm_printer *p)
+{
+	if (block) {
+		print_name(p, "  - debugbus-block: ", block->name);
+
+		/*
+		 * count for regular debugbus data is in quadwords,
+		 * but print the size in dwords for consistency
+		 */
+		drm_printf(p, "    count: %d\n", block->count << 1);
+
+		print_ascii85(p, block->count << 3, data);
+	}
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+		struct drm_printer *p)
+{
+	int i;
+
+	for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+		struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+		a6xx_show_debugbus_block(obj->handle, obj->data, p);
+	}
+
+	if (a6xx_state->vbif_debugbus) {
+		struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+		drm_puts(p, "  - debugbus-block: A6XX_DBGBUS_VBIF\n");
+		drm_printf(p, "    count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+		/* vbif debugbus data is in dwords.  Confusing, huh? */
+		print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+	}
+
+	for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+		struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+		a6xx_show_debugbus_block(obj->handle, obj->data, p);
+	}
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+		struct drm_printer *p)
+{
+	struct a6xx_gpu_state *a6xx_state = container_of(state,
+			struct a6xx_gpu_state, base);
+	int i;
+
+	if (IS_ERR_OR_NULL(state))
+		return;
+
+	adreno_show(gpu, state, p);
+
+	drm_puts(p, "registers:\n");
+	for (i = 0; i < a6xx_state->nr_registers; i++) {
+		struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+		const struct a6xx_registers *regs = obj->handle;
+
+		if (!obj->handle)
+			continue;
+
+		a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+	}
+
+	drm_puts(p, "registers-gmu:\n");
+	for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+		struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+		const struct a6xx_registers *regs = obj->handle;
+
+		if (!obj->handle)
+			continue;
+
+		a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+	}
+
+	drm_puts(p, "indexed-registers:\n");
+	for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+		a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+	drm_puts(p, "shader-blocks:\n");
+	for (i = 0; i < a6xx_state->nr_shaders; i++)
+		a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+	drm_puts(p, "clusters:\n");
+	for (i = 0; i < a6xx_state->nr_clusters; i++)
+		a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+	for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+		a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+	drm_puts(p, "debugbus:\n");
+	a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 0000000..68cccfa
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+	0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+	0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+	0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+	0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+	0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+	0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+	0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+	0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+	0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+	0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+	0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+	0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+	0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE    0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS  3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS    5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+	{ .id = _id, .name = #_id,\
+		.registers = _reg, \
+		.count = ARRAY_SIZE(_reg), \
+		.sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+	u32 id;
+	const char *name;
+	const u32 *registers;
+	size_t count;
+	u32 sel_reg;
+	u32 sel_val;
+} a6xx_clusters[] = {
+	CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+	CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+	CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+	CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+	CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+	CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+	0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+	0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+	0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+	0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+	0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+	0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+	0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+	0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+	0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+	0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+	0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+	0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+	0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+	0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+	0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+	{ .name = #_id, .statetype = _type, .base = _base, \
+		.registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+	const char *name;
+	u32 statetype;
+	u32 base;
+	const u32 *registers;
+	size_t count;
+} a6xx_dbgahb_clusters[] = {
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+	CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+	0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+	0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+	0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+	0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+	0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+	const u32 *registers;
+	size_t count;
+	u32 val0;
+	u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+	{ .val0 = _base, .val1 = _type, .registers = _array, \
+		.count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+	HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+	HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+	HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+	{ .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+	const char *name;
+	u32 type;
+	u32 size;
+} a6xx_shader_blocks[] = {
+	SHADER(A6XX_TP0_TMO_DATA, 0x200),
+	SHADER(A6XX_TP0_SMO_DATA, 0x80),
+	SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+	SHADER(A6XX_TP1_TMO_DATA, 0x200),
+	SHADER(A6XX_TP1_SMO_DATA, 0x80),
+	SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+	SHADER(A6XX_SP_INST_DATA, 0x800),
+	SHADER(A6XX_SP_LB_0_DATA, 0x800),
+	SHADER(A6XX_SP_LB_1_DATA, 0x800),
+	SHADER(A6XX_SP_LB_2_DATA, 0x800),
+	SHADER(A6XX_SP_LB_3_DATA, 0x800),
+	SHADER(A6XX_SP_LB_4_DATA, 0x800),
+	SHADER(A6XX_SP_LB_5_DATA, 0x200),
+	SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+	SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+	SHADER(A6XX_SP_UAV_DATA, 0x80),
+	SHADER(A6XX_SP_INST_TAG, 0x80),
+	SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+	SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+	SHADER(A6XX_SP_SMO_TAG, 0x80),
+	SHADER(A6XX_SP_STATE_DATA, 0x3f),
+	SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+	SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+	SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+	SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+	SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+	SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+	SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+	SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+	SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+	SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+	SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+	SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+	SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+	SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+	SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+	SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+	SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+	SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+	SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+	SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+	SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+	0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+	0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+	0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+	0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+	/* RBBM */
+	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+	0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+	0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+	0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+	0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+	0x0540, 0x0555,
+	/* CP */
+	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+	0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+	0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+	0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+	0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+	0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+	0x0a00, 0x0a03,
+	/* VSC */
+	0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+	/* UCHE */
+	0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+	0x0e38, 0x0e39,
+	/* GRAS */
+	0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+	0x8630, 0x8637,
+	/* VPC */
+	0x9600, 0x9604, 0x9624, 0x9637,
+	/* PC */
+	0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+	0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+	0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+	/* VFD */
+	0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+	0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+	{ .registers = _array, .count = ARRAY_SIZE(_array), \
+		.val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+	REGS(a6xx_registers, 0, 0),
+	REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+	REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+	/* RBBM_STATUS - RBBM_STATUS3 */
+	0x210, 0x213,
+	/* CP_STATUS_1 */
+	0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+	0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+	0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+	0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+	0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+	0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+	0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+	0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+	0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+	0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+	REGS(a6xx_ahb_registers, 0, 0),
+	REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+	/* GMU GX */
+	0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+	0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+	0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+	0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+	0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+	/* GMU CX */
+	0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+	0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+	0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+	0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+	0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+	0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+	0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+	0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+	0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+	/* GPU RSCC */
+	0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+	0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+	0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+	/* GMU AO */
+	0x9300, 0x9316, 0x9400, 0x9400,
+	/* GPU CC */
+	0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+	0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+	0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+	0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+	0xb800, 0xb802,
+	/* GPU CC ACD */
+	0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+	REGS(a6xx_gmu_cx_registers, 0, 0),
+	REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+	const char *name;
+	u32 addr;
+	u32 data;
+	u32 count;
+} a6xx_indexed_reglist[] = {
+	{ "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+		REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+	{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+		REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+	{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+		REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+	{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+		REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+	"CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+		REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+	const char *name;
+	u32 id;
+	u32 count;
+} a6xx_debugbus_blocks[] = {
+	DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+	DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+	DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 6ff9bae..eda11abc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
 		val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
 
 	if (ret) {
-		dev_err(gmu->dev,
+		DRM_DEV_ERROR(gmu->dev,
 			"Message %s id %d timed out waiting for response\n",
 			a6xx_hfi_msg_id[id], seqnum);
 		return -ETIMEDOUT;
@@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
 
 		/* If the queue is empty our response never made it */
 		if (!ret) {
-			dev_err(gmu->dev,
+			DRM_DEV_ERROR(gmu->dev,
 				"The HFI response queue is unexpectedly empty\n");
 
 			return -ENOENT;
@@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
 			struct a6xx_hfi_msg_error *error =
 				(struct a6xx_hfi_msg_error *) &resp;
 
-			dev_err(gmu->dev, "GMU firmware error %d\n",
+			DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
 				error->code);
 			continue;
 		}
 
 		if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
-			dev_err(gmu->dev,
+			DRM_DEV_ERROR(gmu->dev,
 				"Unexpected message id %d on the response queue\n",
 				HFI_HEADER_SEQNUM(resp.ret_header));
 			continue;
 		}
 
 		if (resp.error) {
-			dev_err(gmu->dev,
+			DRM_DEV_ERROR(gmu->dev,
 				"Message %s id %d returned error %d\n",
 				a6xx_hfi_msg_id[id], seqnum, resp.error);
 			return -EINVAL;
@@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
 
 	ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
 	if (ret) {
-		dev_err(gmu->dev, "Unable to send message %s id %d\n",
+		DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
 			a6xx_hfi_msg_id[id], seqnum);
 		return ret;
 	}
@@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
 			continue;
 
 		if (queue->header->read_index != queue->header->write_index)
-			dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+			DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
 
 		queue->header->read_index = 0;
 		queue->header->write_index = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 1318959..641d3ba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
 #define REG_AXXX_CP_STATE_DEBUG_DATA				0x000001ed
 
 #define REG_AXXX_CP_INT_CNTL					0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK				0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK			0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK			0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK		0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK		0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK				0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK				0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK				0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK				0x80000000
 
 #define REG_AXXX_CP_INT_STATUS					0x000001f3
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 86abdb2..714ed65 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
 
 static const struct adreno_info gpulist[] = {
 	{
+		.rev   = ADRENO_REV(2, 0, 0, 0),
+		.revn  = 200,
+		.name  = "A200",
+		.fw = {
+			[ADRENO_FW_PM4] = "yamato_pm4.fw",
+			[ADRENO_FW_PFP] = "yamato_pfp.fw",
+		},
+		.gmem  = SZ_256K,
+		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+		.init  = a2xx_gpu_init,
+	}, { /* a200 on i.mx51 has only 128kib gmem */
+		.rev   = ADRENO_REV(2, 0, 0, 1),
+		.revn  = 201,
+		.name  = "A200",
+		.fw = {
+			[ADRENO_FW_PM4] = "yamato_pm4.fw",
+			[ADRENO_FW_PFP] = "yamato_pfp.fw",
+		},
+		.gmem  = SZ_128K,
+		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+		.init  = a2xx_gpu_init,
+	}, {
+		.rev   = ADRENO_REV(2, 2, 0, ANY_ID),
+		.revn  = 220,
+		.name  = "A220",
+		.fw = {
+			[ADRENO_FW_PM4] = "leia_pm4_470.fw",
+			[ADRENO_FW_PFP] = "leia_pfp_470.fw",
+		},
+		.gmem  = SZ_512K,
+		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
+		.init  = a2xx_gpu_init,
+	}, {
 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
 		.revn  = 305,
 		.name  = "A305",
@@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
 
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
-		dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
 		return NULL;
 	}
 
@@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
 	mutex_unlock(&dev->struct_mutex);
 	pm_runtime_put_autosuspend(&pdev->dev);
 	if (ret) {
-		dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
 		return NULL;
 	}
 
@@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
 	if (ret == 0) {
 		unsigned int r, patch;
 
-		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+		    sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
 			rev->core = r / 100;
 			r %= 100;
 			rev->major = r / 10;
@@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
 	/* and if that fails, fall back to legacy "qcom,chipid" property: */
 	ret = of_property_read_u32(node, "qcom,chipid", &chipid);
 	if (ret) {
-		dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+		DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
 		return ret;
 	}
 
@@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
 	static struct adreno_platform_config config = {};
 	const struct adreno_info *info;
 	struct drm_device *drm = dev_get_drvdata(master);
+	struct msm_drm_private *priv = drm->dev_private;
 	struct msm_gpu *gpu;
 	int ret;
 
@@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
 	DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
 		config.rev.minor, config.rev.patchid);
 
+	priv->is_a2xx = config.rev.core == 2;
+
 	gpu = info->init(drm);
 	if (IS_ERR(gpu)) {
 		dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
 		.unbind = adreno_unbind,
 };
 
+static void adreno_device_register_headless(void)
+{
+	/* on imx5, we don't have a top-level mdp/dpu node
+	 * this creates a dummy node for the driver for that case
+	 */
+	struct platform_device_info dummy_info = {
+		.parent = NULL,
+		.name = "msm",
+		.id = -1,
+		.res = NULL,
+		.num_res = 0,
+		.data = NULL,
+		.size_data = 0,
+		.dma_mask = ~0,
+	};
+	platform_device_register_full(&dummy_info);
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
-	return component_add(&pdev->dev, &a3xx_ops);
+
+	int ret;
+
+	ret = component_add(&pdev->dev, &a3xx_ops);
+	if (ret)
+		return ret;
+
+	if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+		adreno_device_register_headless();
+
+	return 0;
 }
 
 static int adreno_remove(struct platform_device *pdev)
@@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,adreno-3xx" },
+	/* for compatibility with imx5 gpu: */
+	{ .compatible = "amd,imageon" },
 	/* for backwards compat w/ downstream kgsl DT files: */
 	{ .compatible = "qcom,kgsl-3d0" },
 	{}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4..2e4372ef 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
 		ret = request_firmware_direct(&fw, newname, drm->dev);
 		if (!ret) {
-			dev_info(drm->dev, "loaded %s from new location\n",
+			DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
 				newname);
 			adreno_gpu->fwloc = FW_LOCATION_NEW;
 			goto out;
 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-			dev_err(drm->dev, "failed to load %s: %d\n",
+			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 				newname, ret);
 			fw = ERR_PTR(ret);
 			goto out;
@@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
 		ret = request_firmware_direct(&fw, fwname, drm->dev);
 		if (!ret) {
-			dev_info(drm->dev, "loaded %s from legacy location\n",
+			DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
 				newname);
 			adreno_gpu->fwloc = FW_LOCATION_LEGACY;
 			goto out;
 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-			dev_err(drm->dev, "failed to load %s: %d\n",
+			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 				fwname, ret);
 			fw = ERR_PTR(ret);
 			goto out;
@@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
 		ret = request_firmware(&fw, newname, drm->dev);
 		if (!ret) {
-			dev_info(drm->dev, "loaded %s with helper\n",
+			DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
 				newname);
 			adreno_gpu->fwloc = FW_LOCATION_HELPER;
 			goto out;
 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-			dev_err(drm->dev, "failed to load %s: %d\n",
+			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
 				newname, ret);
 			fw = ERR_PTR(ret);
 			goto out;
 		}
 	}
 
-	dev_err(drm->dev, "failed to load %s\n", fwname);
+	DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
 	fw = ERR_PTR(-ENOENT);
 out:
 	kfree(newname);
@@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
 		if (!ring)
 			continue;
 
-		ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
-		if (ret) {
-			ring->iova = 0;
-			dev_err(gpu->dev->dev,
-				"could not map ringbuffer %d: %d\n", i, ret);
-			return ret;
-		}
-
 		ring->cur = ring->start;
 		ring->next = ring->start;
 
@@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
 
 	ret = msm_gpu_hw_init(gpu);
 	if (ret) {
-		dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
 		/* hmm, oh well? */
 	}
 }
@@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		 */
 		OUT_PKT3(ring, CP_EVENT_WRITE, 1);
 		OUT_RING(ring, HLSQ_FLUSH);
-
-		OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
-		OUT_RING(ring, 0x00000000);
 	}
 
-	/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
-	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-	OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
-	OUT_RING(ring, rbmemptr(ring, fence));
-	OUT_RING(ring, submit->seqno);
+	/* wait for idle before cache flush/interrupt */
+	OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+	OUT_RING(ring, 0x00000000);
+
+	if (!adreno_is_a2xx(adreno_gpu)) {
+		/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+		OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+		OUT_RING(ring, rbmemptr(ring, fence));
+		OUT_RING(ring, submit->seqno);
+	} else {
+		/* BIT(31) means something else on a2xx */
+		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+		OUT_RING(ring, CACHE_FLUSH_TS);
+		OUT_RING(ring, rbmemptr(ring, fence));
+		OUT_RING(ring, submit->seqno);
+		OUT_PKT3(ring, CP_INTERRUPT, 1);
+		OUT_RING(ring, 0x80000000);
+	}
 
 #if 0
 	if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
 				size = j + 1;
 
 		if (size) {
-			state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+			state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
 			if (state->ring[i].data) {
 				memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
 				state->ring[i].data_size = size << 2;
@@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
 		}
 	}
 
+	/* Some targets prefer to collect their own registers */
+	if (!adreno_gpu->registers)
+		return 0;
+
 	/* Count the number of registers */
 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
 		count += adreno_gpu->registers[i + 1] -
@@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(state->ring); i++)
-		kfree(state->ring[i].data);
+		kvfree(state->ring[i].data);
 
 	for (i = 0; state->bos && i < state->nr_bos; i++)
 		kvfree(state->bos[i].data);
@@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
 
 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
 {
+	void *buf;
+	size_t buf_itr = 0, buffer_size;
 	char out[ASCII85_BUFSZ];
-	long l, datalen, i;
+	long l;
+	int i;
 
-	if (!ptr || !len)
-		return;
+	if (!src || !len)
+		return NULL;
+
+	l = ascii85_encode_len(len);
 
 	/*
-	 * Only dump the non-zero part of the buffer - rarely will any data
-	 * completely fill the entire allocated size of the buffer
+	 * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+	 * account for the worst case of 5 bytes per dword plus the 1 for '\0'
 	 */
-	for (datalen = 0, i = 0; i < len >> 2; i++) {
-		if (ptr[i])
-			datalen = (i << 2) + 1;
-	}
+	buffer_size = (l * 5) + 1;
 
-	/* Skip printing the object if it is empty */
-	if (datalen == 0)
+	buf = kvmalloc(buffer_size, GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	for (i = 0; i < l; i++)
+		buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+				ascii85_encode(src[i], out));
+
+	return buf;
+}
+
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+		bool *encoded)
+{
+	if (!*ptr || !len)
 		return;
 
-	l = ascii85_encode_len(datalen);
+	if (!*encoded) {
+		long datalen, i;
+		u32 *buf = *ptr;
+
+		/*
+		 * Only dump the non-zero part of the buffer - rarely will
+		 * any data completely fill the entire allocated size of
+		 * the buffer.
+		 */
+		for (datalen = 0, i = 0; i < len >> 2; i++)
+			if (buf[i])
+				datalen = ((i + 1) << 2);
+
+		/*
+		 * If we reach here, then the originally captured binary buffer
+		 * will be replaced with the ascii85 encoded string
+		 */
+		*ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+		kvfree(buf);
+
+		*encoded = true;
+	}
+
+	if (!*ptr)
+		return;
 
 	drm_puts(p, "    data: !!ascii85 |\n");
 	drm_puts(p, "     ");
 
-	for (i = 0; i < l; i++)
-		drm_puts(p, ascii85_encode(ptr[i], out));
+	drm_puts(p, *ptr);
 
 	drm_puts(p, "\n");
 }
@@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
 		drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
 		drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
 
-		adreno_show_object(p, state->ring[i].data,
-			state->ring[i].data_size);
+		adreno_show_object(p, &state->ring[i].data,
+			state->ring[i].data_size, &state->ring[i].encoded);
 	}
 
 	if (state->bos) {
@@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
 				state->bos[i].iova);
 			drm_printf(p, "    size: %zd\n", state->bos[i].size);
 
-			adreno_show_object(p, state->bos[i].data,
-				state->bos[i].size);
+			adreno_show_object(p, &state->bos[i].data,
+				state->bos[i].size, &state->bos[i].encoded);
 		}
 	}
 
-	drm_puts(p, "registers:\n");
+	if (state->nr_registers) {
+		drm_puts(p, "registers:\n");
 
-	for (i = 0; i < state->nr_registers; i++) {
-		drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
-			state->registers[i * 2] << 2,
-			state->registers[(i * 2) + 1]);
+		for (i = 0; i < state->nr_registers; i++) {
+			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
+				state->registers[i * 2] << 2,
+				state->registers[(i * 2) + 1]);
+		}
 	}
 }
 #endif
@@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 	int i;
 
+	if (!adreno_gpu->registers)
+		return;
+
 	/* dump these out in a form that can be parsed by demsm: */
 	printk("IO:region %s 00000000 00020000\n", gpu->name);
 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
 
 	node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
 	if (!node) {
-		dev_err(dev, "Could not find the GPU powerlevels\n");
+		DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
 		return -ENXIO;
 	}
 
@@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
 	else {
 		ret = dev_pm_opp_of_add_table(dev);
 		if (ret)
-			dev_err(dev, "Unable to set the OPP table\n");
+			DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
 	}
 
 	if (!ret) {
@@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
 	adreno_gpu_config.va_start = SZ_16M;
 	adreno_gpu_config.va_end = 0xffffffff;
+	/* maximum range of a2xx mmu */
+	if (adreno_is_a2xx(adreno_gpu))
+		adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
 
 	adreno_gpu_config.nr_rings = nr_rings;
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index de6e6ee..5db459b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -21,6 +21,7 @@
 #define __ADRENO_GPU_H__
 
 #include <linux/firmware.h>
+#include <linux/iopoll.h>
 
 #include "msm_gpu.h"
 
@@ -154,6 +155,20 @@ struct adreno_platform_config {
 	__ret;                                             \
 })
 
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+	return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+	return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+	return gpu->revn == 225;
+}
 
 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
 {
@@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
 		gpu_write(&gpu->base, reg - 1, data);
 }
 
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
 	((1 << 29) \
 	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
 
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+	readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+		interval, timeout)
+
 #endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 15eb03b..79b907a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -108,6 +108,13 @@ enum pc_di_src_sel {
 	DI_SRC_SEL_RESERVED = 3,
 };
 
+enum pc_di_face_cull_sel {
+	DI_FACE_CULL_NONE = 0,
+	DI_FACE_CULL_FETCH = 1,
+	DI_FACE_BACKFACE_CULL = 2,
+	DI_FACE_FRONTFACE_CULL = 3,
+};
+
 enum pc_di_index_size {
 	INDEX_SIZE_IGN = 0,
 	INDEX_SIZE_16_BIT = 0,
@@ -356,6 +363,7 @@ enum a6xx_render_mode {
 	RM6_GMEM = 4,
 	RM6_BLIT2D = 5,
 	RM6_RESOLVE = 6,
+	RM6_BLIT2DSCALE = 12,
 };
 
 enum pseudo_reg {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 879c13f..e45c690 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 	unsigned long irq_flags;
 	int i, irq_count, enable_count, cb_count;
 
-	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
-		DPU_ERROR("invalid parameters\n");
+	if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
 		return 0;
-	}
 
 	for (i = 0; i < irq_obj->total_irqs; i++) {
 		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 
 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
 
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
 		struct dentry *parent)
 {
-	dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
-			parent, &dpu_kms->irq_obj,
-			&dpu_debugfs_core_irq_fops);
-
-	return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
-	debugfs_remove(dpu_kms->irq_obj.debugfs_file);
-	dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
-		struct dentry *parent)
-{
-	return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+	debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+		&dpu_debugfs_core_irq_fops);
 }
 #endif
 
@@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 	struct msm_drm_private *priv;
 	int i;
 
-	if (!dpu_kms) {
-		DPU_ERROR("invalid dpu_kms\n");
-		return;
-	} else if (!dpu_kms->dev) {
+	if (!dpu_kms->dev) {
 		DPU_ERROR("invalid drm device\n");
 		return;
 	} else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 	}
 }
 
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
-	return 0;
-}
-
 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 {
 	struct msm_drm_private *priv;
 	int i;
 
-	if (!dpu_kms) {
-		DPU_ERROR("invalid dpu_kms\n");
-		return;
-	} else if (!dpu_kms->dev) {
+	if (!dpu_kms->dev) {
 		DPU_ERROR("invalid drm device\n");
 		return;
 	} else if (!dpu_kms->dev->dev_private) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 5e98bba..e9015a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -24,13 +24,6 @@
 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
 
 /**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms:		DPU handle
- * @return:		0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
-/**
  * dpu_core_irq_uninstall - uninstall core IRQ handler
  * @dpu_kms:		DPU handle
  * @return:		none
@@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
  * dpu_debugfs_core_irq_init - register core irq debugfs
  * @dpu_kms: pointer to kms
  * @parent: debugfs directory root
- * @Return: 0 on success
  */
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
 		struct dentry *parent);
 
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
 #endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 41c5191..9f20f39 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -24,8 +24,6 @@
 #include "dpu_crtc.h"
 #include "dpu_core_perf.h"
 
-#define DPU_PERF_MODE_STRING_SIZE	128
-
 /**
  * enum dpu_perf_mode - performance tuning mode
  * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
 	return to_dpu_kms(priv->kms);
 }
 
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
-	return dpu_crtc_is_enabled(crtc);
-}
-
 static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
 {
 	struct drm_crtc *tmp_crtc;
-	bool intf_connected = false;
-
-	if (!crtc)
-		goto end;
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
 		if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
-				_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+				tmp_crtc->enabled) {
 			DPU_DEBUG("video interface connected crtc:%d\n",
 				tmp_crtc->base.id);
-			intf_connected = true;
-			goto end;
+			return true;
 		}
 	}
 
-end:
-	return intf_connected;
+	return false;
 }
 
 static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
 	memset(perf, 0, sizeof(struct dpu_core_perf_params));
 
 	if (!dpu_cstate->bw_control) {
-		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
 					1000ULL;
 			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
 		}
 		perf->core_clk_rate = kms->perf.max_core_clk_rate;
 	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
-		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 			perf->bw_ctl[i] = 0;
 			perf->max_per_pipe_ib[i] = 0;
 		}
 		perf->core_clk_rate = 0;
 	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
-		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
 			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
 		}
@@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
 	DPU_DEBUG(
 		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
 			crtc->base.id, perf->core_clk_rate,
-			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
-			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+			perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+			perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+			perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
+			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
 }
 
 int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 	/* obtain new values */
 	_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
 
-	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
-			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+	for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+			i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 		bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
 		curr_client_type = dpu_crtc_get_client_type(crtc);
 
 		drm_for_each_crtc(tmp_crtc, crtc->dev) {
-			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			if (tmp_crtc->enabled &&
 			    (dpu_crtc_get_client_type(tmp_crtc) ==
 					    curr_client_type) &&
 			    (tmp_crtc != crtc)) {
@@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
 	int ret = 0;
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+		if (tmp_crtc->enabled &&
 			curr_client_type ==
 				dpu_crtc_get_client_type(tmp_crtc)) {
 			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 	 */
 	if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
 		drm_for_each_crtc(tmp_crtc, crtc->dev) {
-			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			if (tmp_crtc->enabled &&
 				dpu_crtc_get_intf_mode(tmp_crtc) ==
 						INTF_MODE_VIDEO)
 				return;
@@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 	if (kms->perf.enable_bw_release) {
 		trace_dpu_cmd_release_bw(crtc->base.id);
 		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
-		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 			dpu_crtc->cur_perf.bw_ctl[i] = 0;
 			_dpu_core_perf_crtc_update_bus(kms, crtc, i);
 		}
@@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
 	struct dpu_crtc_state *dpu_cstate;
 
 	drm_for_each_crtc(crtc, kms->dev) {
-		if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+		if (crtc->enabled) {
 			dpu_cstate = to_dpu_crtc_state(crtc->state);
 			clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
 							clk_rate);
@@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 	old = &dpu_crtc->cur_perf;
 	new = &dpu_cstate->new_perf;
 
-	if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
-		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+	if (crtc->enabled && !stop_req) {
+		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 			/*
 			 * cases for bus bandwidth update.
 			 * 1. new bandwidth vote - "ab or ib vote" is higher
@@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
 		update_clk = 1;
 	}
 	trace_dpu_perf_crtc_update(crtc->base.id,
-				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+				new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+				new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+				new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
 				new->core_clk_rate, stop_req,
 				update_bus, update_clk);
 
-	for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+	for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
 		if (update_bus & BIT(i)) {
 			ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
 			if (ret) {
@@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
 	struct dpu_core_perf *perf = file->private_data;
 	struct dpu_perf_cfg *cfg = &perf->catalog->perf;
 	u32 perf_mode = 0;
-	char buf[10];
+	int ret;
 
-	if (!perf)
-		return -ENODEV;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (kstrtouint(buf, 0, &perf_mode))
-		return -EFAULT;
+	ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+	if (ret)
+		return ret;
 
 	if (perf_mode >= DPU_PERF_MODE_MAX)
-		return -EFAULT;
+		return -EINVAL;
 
 	if (perf_mode == DPU_PERF_MODE_FIXED) {
 		DRM_INFO("fix performance mode\n");
@@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
 			char __user *buff, size_t count, loff_t *ppos)
 {
 	struct dpu_core_perf *perf = file->private_data;
-	int len = 0;
-	char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+	int len;
+	char buf[128];
 
-	if (!perf)
-		return -ENODEV;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	len = snprintf(buf, sizeof(buf),
+	len = scnprintf(buf, sizeof(buf),
 			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
 			perf->perf_tune.mode,
 			perf->perf_tune.min_core_clk,
 			perf->perf_tune.min_bus_vote);
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
 
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;   /* increase offset */
-
-	return len;
+	return simple_read_from_buffer(buff, count, ppos, buf, len);
 }
 
 static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
 	.write = _dpu_core_perf_mode_write,
 };
 
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
 {
-	debugfs_remove_recursive(perf->debugfs_root);
-	perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-		struct dentry *parent)
-{
+	struct dpu_core_perf *perf = &dpu_kms->perf;
 	struct dpu_mdss_cfg *catalog = perf->catalog;
-	struct msm_drm_private *priv;
-	struct dpu_kms *dpu_kms;
+	struct dentry *entry;
 
-	priv = perf->dev->dev_private;
-	if (!priv || !priv->kms) {
-		DPU_ERROR("invalid KMS reference\n");
+	entry = debugfs_create_dir("core_perf", parent);
+	if (IS_ERR_OR_NULL(entry))
 		return -EINVAL;
-	}
 
-	dpu_kms = to_dpu_kms(priv->kms);
-
-	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
-	if (!perf->debugfs_root) {
-		DPU_ERROR("failed to create core perf debugfs\n");
-		return -EINVAL;
-	}
-
-	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+	debugfs_create_u64("max_core_clk_rate", 0600, entry,
 			&perf->max_core_clk_rate);
-	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+	debugfs_create_u64("core_clk_rate", 0600, entry,
 			&perf->core_clk_rate);
-	debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+	debugfs_create_u32("enable_bw_release", 0600, entry,
 			(u32 *)&perf->enable_bw_release);
-	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+	debugfs_create_u32("threshold_low", 0600, entry,
 			(u32 *)&catalog->perf.max_bw_low);
-	debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+	debugfs_create_u32("threshold_high", 0600, entry,
 			(u32 *)&catalog->perf.max_bw_high);
-	debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+	debugfs_create_u32("min_core_ib", 0600, entry,
 			(u32 *)&catalog->perf.min_core_ib);
-	debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+	debugfs_create_u32("min_llcc_ib", 0600, entry,
 			(u32 *)&catalog->perf.min_llcc_ib);
-	debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+	debugfs_create_u32("min_dram_ib", 0600, entry,
 			(u32 *)&catalog->perf.min_dram_ib);
-	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+	debugfs_create_file("perf_mode", 0600, entry,
 			(u32 *)perf, &dpu_core_perf_mode_fops);
-	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+	debugfs_create_u64("fix_core_clk_rate", 0600, entry,
 			&perf->fix_core_clk_rate);
-	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+	debugfs_create_u64("fix_core_ib_vote", 0600, entry,
 			&perf->fix_core_ib_vote);
-	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+	debugfs_create_u64("fix_core_ab_vote", 0600, entry,
 			&perf->fix_core_ab_vote);
 
 	return 0;
 }
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-		struct dentry *parent)
-{
-	return 0;
-}
 #endif
 
 void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
 		return;
 	}
 
-	dpu_core_perf_debugfs_destroy(perf);
 	perf->max_core_clk_rate = 0;
 	perf->core_clk = NULL;
-	perf->phandle = NULL;
 	perf->catalog = NULL;
 	perf->dev = NULL;
 }
@@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
 int dpu_core_perf_init(struct dpu_core_perf *perf,
 		struct drm_device *dev,
 		struct dpu_mdss_cfg *catalog,
-		struct dpu_power_handle *phandle,
 		struct dss_clk *core_clk)
 {
 	perf->dev = dev;
 	perf->catalog = catalog;
-	perf->phandle = phandle;
 	perf->core_clk = core_clk;
 
 	perf->max_core_clk_rate = core_clk->max_rate;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index fbcbe0c..37f5188 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,19 +19,31 @@
 #include <drm/drm_crtc.h>
 
 #include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
 
 #define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
 
 /**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+	DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+	DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+	DPU_CORE_PERF_DATA_BUS_ID_EBI,
+	DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
+/**
  * struct dpu_core_perf_params - definition of performance parameters
  * @max_per_pipe_ib: maximum instantaneous bandwidth request
  * @bw_ctl: arbitrated bandwidth request
  * @core_clk_rate: core clock rate request
  */
 struct dpu_core_perf_params {
-	u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
-	u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
+	u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
 	u64 core_clk_rate;
 };
 
@@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
  * @dev: Pointer to drm device
  * @debugfs_root: top level debug folder
  * @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
  * @core_clk: Pointer to core clock structure
  * @core_clk_rate: current core clock rate
  * @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +77,6 @@ struct dpu_core_perf {
 	struct drm_device *dev;
 	struct dentry *debugfs_root;
 	struct dpu_mdss_cfg *catalog;
-	struct dpu_power_handle *phandle;
 	struct dss_clk *core_clk;
 	u64 core_clk_rate;
 	u64 max_core_clk_rate;
@@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
  * @perf: Pointer to core performance context
  * @dev: Pointer to drm device
  * @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
  * @core_clk: pointer to core clock
  */
 int dpu_core_perf_init(struct dpu_core_perf *perf,
 		struct drm_device *dev,
 		struct dpu_mdss_cfg *catalog,
-		struct dpu_power_handle *phandle,
 		struct dss_clk *core_clk);
 
+struct dpu_kms;
+
 /**
  * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
  * @debugfs_parent: Pointer to parent debugfs
  */
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-		struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
 
 #endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index ca169f0..9be7c35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -33,7 +33,6 @@
 #include "dpu_plane.h"
 #include "dpu_encoder.h"
 #include "dpu_vbif.h"
-#include "dpu_power_handle.h"
 #include "dpu_core_perf.h"
 #include "dpu_trace.h"
 
@@ -47,13 +46,7 @@
 #define LEFT_MIXER 0
 #define RIGHT_MIXER 1
 
-static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
-					    struct drm_display_mode *mode)
-{
-	return mode->hdisplay / cstate->num_mixers;
-}
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv = crtc->dev->dev_private;
 
@@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
 	if (!crtc)
 		return;
 
-	dpu_crtc->phandle = NULL;
-
 	drm_crtc_cleanup(crtc);
-	mutex_destroy(&dpu_crtc->crtc_lock);
 	kfree(dpu_crtc);
 }
 
@@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
 		return INTF_MODE_NONE;
 	}
 
-	drm_for_each_encoder(encoder, crtc->dev)
-		if (encoder->crtc == crtc)
-			return dpu_encoder_get_intf_mode(encoder);
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		return dpu_encoder_get_intf_mode(encoder);
 
 	return INTF_MODE_NONE;
 }
 
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
 {
-	struct drm_crtc *crtc = (struct drm_crtc *)data;
 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
 	/* keep statistics on vblank callback - with auto reset via debugfs */
@@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
 }
 
+static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
+{
+	int ret = 0;
+	struct drm_modeset_acquire_ctx ctx;
+
+	DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+	dpu_core_perf_crtc_release_bw(crtc);
+	DRM_MODESET_LOCK_ALL_END(ctx, ret);
+	if (ret)
+		DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
+			  ret);
+}
+
 static void dpu_crtc_frame_event_work(struct kthread_work *work)
 {
 	struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
 			/* release bandwidth and other resources */
 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
 							fevent->event);
-			dpu_core_perf_crtc_release_bw(crtc);
+			dpu_crtc_release_bw_unlocked(crtc);
 		} else {
 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
 								fevent->event);
@@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
 
 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
 {
-	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 	struct drm_encoder *enc;
 
-	mutex_lock(&dpu_crtc->crtc_lock);
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
 	/* Check for mixers on all encoders attached to this crtc */
-	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
-		if (enc->crtc != crtc)
-			continue;
-
+	drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
 		_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-	}
-
-	mutex_unlock(&dpu_crtc->crtc_lock);
 }
 
 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
-	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
-	u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
+	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
 	int i;
 
 	for (i = 0; i < cstate->num_mixers; i++) {
@@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 		r->x1 = crtc_split_width * i;
 		r->y1 = 0;
 		r->x2 = r->x1 + crtc_split_width;
-		r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+		r->y2 = adj_mode->vdisplay;
 
 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
 	}
@@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		/* encoder will trigger pending mask now */
+	/* encoder will trigger pending mask now */
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 		dpu_encoder_trigger_kickoff_pending(encoder);
-	}
 
 	/*
 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
@@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
 	return rc;
 }
 
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
 {
 	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
 
 	DPU_ATRACE_BEGIN("crtc_commit");
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	/*
+	 * Encoder will flush/start now, unless it has a tx pending. If so, it
+	 * may delay and flush at an irq event (e.g. ppdone)
+	 */
+	drm_for_each_encoder_mask(encoder, crtc->dev,
+				  crtc->state->encoder_mask) {
 		struct dpu_encoder_kickoff_params params = { 0 };
-
-		if (encoder->crtc != crtc)
-			continue;
-
-		/*
-		 * Encoder will flush/start now, unless it has a tx pending.
-		 * If so, it may delay and flush at an irq event (e.g. ppdone)
-		 */
-		dpu_encoder_prepare_for_kickoff(encoder, &params);
+		dpu_encoder_prepare_for_kickoff(encoder, &params, async);
 	}
 
-	/* wait for frame_event_done completion */
-	DPU_ATRACE_BEGIN("wait_for_frame_done_event");
-	ret = _dpu_crtc_wait_for_frame_done(crtc);
-	DPU_ATRACE_END("wait_for_frame_done_event");
-	if (ret) {
-		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-				crtc->base.id,
-				atomic_read(&dpu_crtc->frame_pending));
-		goto end;
+
+	if (!async) {
+		/* wait for frame_event_done completion */
+		DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+		ret = _dpu_crtc_wait_for_frame_done(crtc);
+		DPU_ATRACE_END("wait_for_frame_done_event");
+		if (ret) {
+			DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+					crtc->base.id,
+					atomic_read(&dpu_crtc->frame_pending));
+			goto end;
+		}
+
+		if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+			/* acquire bandwidth and other resources */
+			DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+		} else
+			DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+		dpu_crtc->play_count++;
 	}
 
-	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
-		/* acquire bandwidth and other resources */
-		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
-	} else
-		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
-
-	dpu_crtc->play_count++;
-
 	dpu_vbif_clear_errors(dpu_kms);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		dpu_encoder_kickoff(encoder);
-	}
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		dpu_encoder_kickoff(encoder, async);
 
 end:
-	reinit_completion(&dpu_crtc->frame_done_comp);
+	if (!async)
+		reinit_completion(&dpu_crtc->frame_done_comp);
 	DPU_ATRACE_END("crtc_commit");
 }
 
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- */
-static void _dpu_crtc_vblank_enable_no_lock(
-		struct dpu_crtc *dpu_crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
 {
-	struct drm_crtc *crtc = &dpu_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct drm_encoder *enc;
+	struct dpu_crtc_state *cstate;
 
-	if (enable) {
-		/* drop lock since power crtc cb may try to re-acquire lock */
-		mutex_unlock(&dpu_crtc->crtc_lock);
-		pm_runtime_get_sync(dev->dev);
-		mutex_lock(&dpu_crtc->crtc_lock);
+	if (crtc->state)
+		dpu_crtc_destroy_state(crtc, crtc->state);
 
-		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-			if (enc->crtc != crtc)
-				continue;
-
-			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
-						     DRMID(enc), enable,
-						     dpu_crtc);
-
-			dpu_encoder_register_vblank_callback(enc,
-					dpu_crtc_vblank_cb, (void *)crtc);
-		}
-	} else {
-		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-			if (enc->crtc != crtc)
-				continue;
-
-			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
-						     DRMID(enc), enable,
-						     dpu_crtc);
-
-			dpu_encoder_register_vblank_callback(enc, NULL, NULL);
-		}
-
-		/* drop lock since power crtc cb may try to re-acquire lock */
-		mutex_unlock(&dpu_crtc->crtc_lock);
-		pm_runtime_put_sync(dev->dev);
-		mutex_lock(&dpu_crtc->crtc_lock);
-	}
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
-{
-	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
-	DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
-	mutex_lock(&dpu_crtc->crtc_lock);
-
-	/*
-	 * If the vblank is enabled, release a power reference on suspend
-	 * and take it back during resume (if it is still enabled).
-	 */
-	trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
-	if (dpu_crtc->suspend == enable)
-		DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
-				crtc->base.id, enable);
-	else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
-		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
-	}
-
-	dpu_crtc->suspend = enable;
-	mutex_unlock(&dpu_crtc->crtc_lock);
+	crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
+	if (crtc->state)
+		crtc->state->crtc = crtc;
 }
 
 /**
@@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 	return &cstate->base;
 }
 
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
-	struct dpu_crtc *dpu_crtc;
-	struct dpu_crtc_state *cstate;
-
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return;
-	}
-
-	/* revert suspend actions, if necessary */
-	if (dpu_kms_is_suspend_state(crtc->dev))
-		_dpu_crtc_set_suspend(crtc, false);
-
-	/* remove previous state, if present */
-	if (crtc->state) {
-		dpu_crtc_destroy_state(crtc, crtc->state);
-		crtc->state = 0;
-	}
-
-	dpu_crtc = to_dpu_crtc(crtc);
-	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
-	if (!cstate) {
-		DPU_ERROR("failed to allocate state\n");
-		return;
-	}
-
-	cstate->base.crtc = crtc;
-	crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
-	struct drm_crtc *crtc = arg;
-	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-	struct drm_encoder *encoder;
-
-	mutex_lock(&dpu_crtc->crtc_lock);
-
-	trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
-	/* restore encoder; crtc will be programmed during commit */
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		dpu_encoder_virt_restore(encoder);
-	}
-
-	mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state)
 {
 	struct dpu_crtc *dpu_crtc;
 	struct dpu_crtc_state *cstate;
@@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 
 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 
-	if (dpu_kms_is_suspend_state(crtc->dev))
-		_dpu_crtc_set_suspend(crtc, true);
-
 	/* Disable/save vblank irq handling */
 	drm_crtc_vblank_off(crtc);
 
-	mutex_lock(&dpu_crtc->crtc_lock);
+	drm_for_each_encoder_mask(encoder, crtc->dev,
+				  old_crtc_state->encoder_mask)
+		dpu_encoder_assign_crtc(encoder, NULL);
 
 	/* wait for frame_event_done completion */
 	if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 				atomic_read(&dpu_crtc->frame_pending));
 
 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
-	if (dpu_crtc->enabled && !dpu_crtc->suspend &&
-			dpu_crtc->vblank_requested) {
-		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
-	}
 	dpu_crtc->enabled = false;
 
 	if (atomic_read(&dpu_crtc->frame_pending)) {
@@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 
 	dpu_core_perf_crtc_update(crtc, 0, true);
 
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc != crtc)
-			continue;
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
-	}
-
-	if (dpu_crtc->power_event)
-		dpu_power_handle_unregister_event(dpu_crtc->phandle,
-				dpu_crtc->power_event);
 
 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
 	cstate->num_mixers = 0;
@@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 	cstate->bw_control = false;
 	cstate->bw_split_vote = false;
 
-	mutex_unlock(&dpu_crtc->crtc_lock);
-
 	if (crtc->state->event && !crtc->state->active) {
 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
 		crtc->state->event = NULL;
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 	}
+
+	pm_runtime_put_sync(crtc->dev->dev);
 }
 
 static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
 	}
 	priv = crtc->dev->dev_private;
 
+	pm_runtime_get_sync(crtc->dev->dev);
+
 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 	dpu_crtc = to_dpu_crtc(crtc);
 
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc != crtc)
-			continue;
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 		dpu_encoder_register_frame_event_callback(encoder,
 				dpu_crtc_frame_event_cb, (void *)crtc);
-	}
 
-	mutex_lock(&dpu_crtc->crtc_lock);
 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
-	if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
-			dpu_crtc->vblank_requested) {
-		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
-	}
 	dpu_crtc->enabled = true;
 
-	mutex_unlock(&dpu_crtc->crtc_lock);
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		dpu_encoder_assign_crtc(encoder, crtc);
 
 	/* Enable/restore vblank irq handling */
 	drm_crtc_vblank_on(crtc);
-
-	dpu_crtc->power_event = dpu_power_handle_register_event(
-		dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
-		dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
 }
 
 struct plane_state {
@@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 
 	memset(pipe_staged, 0, sizeof(pipe_staged));
 
-	mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
+	mixer_width = mode->hdisplay / cstate->num_mixers;
 
 	_dpu_crtc_setup_lm_bounds(crtc, state);
 
@@ -1289,21 +1134,32 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 
 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
-	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+	struct drm_encoder *enc;
 
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-	dpu_crtc = to_dpu_crtc(crtc);
-
-	mutex_lock(&dpu_crtc->crtc_lock);
 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
-	if (dpu_crtc->enabled && !dpu_crtc->suspend) {
-		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+
+	/*
+	 * Normally we would iterate through encoder_mask in crtc state to find
+	 * attached encoders. In this case, we might be disabling vblank _after_
+	 * encoder_mask has been cleared.
+	 *
+	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
+	 * disable (which is also after encoder_mask is cleared). So instead of
+	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
+	 * currently assigned to our crtc.
+	 *
+	 * Note also that this function cannot be called while crtc is disabled
+	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
+	 * about the assigned crtcs being inconsistent with the current state
+	 * (which means no need to worry about modeset locks).
+	 */
+	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+					     dpu_crtc);
+
+		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
 	}
-	dpu_crtc->vblank_requested = en;
-	mutex_unlock(&dpu_crtc->crtc_lock);
 
 	return 0;
 }
@@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
 
 	int i, out_width;
 
-	if (!s || !s->private)
-		return -EINVAL;
-
 	dpu_crtc = s->private;
 	crtc = &dpu_crtc->base;
 
 	drm_modeset_lock_all(crtc->dev);
 	cstate = to_dpu_crtc_state(crtc->state);
 
-	mutex_lock(&dpu_crtc->crtc_lock);
 	mode = &crtc->state->adjusted_mode;
-	out_width = _dpu_crtc_get_mixer_width(cstate, mode);
+	out_width = mode->hdisplay / cstate->num_mixers;
 
 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
 				mode->hdisplay, mode->vdisplay);
@@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
 	}
 
-	seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
-	mutex_unlock(&dpu_crtc->crtc_lock);
 	drm_modeset_unlock_all(crtc->dev);
 
 	return 0;
@@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
 	seq_printf(s, "core_clk_rate: %llu\n",
 			dpu_crtc->cur_perf.core_clk_rate);
-	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
-			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		seq_printf(s, "bw_ctl[%s]: %llu\n",
-				dpu_power_handle_get_dbus_name(i),
+	for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+			i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
+		seq_printf(s, "bw_ctl[%d]: %llu\n", i,
 				dpu_crtc->cur_perf.bw_ctl[i]);
-		seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
-				dpu_power_handle_get_dbus_name(i),
+		seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
 				dpu_crtc->cur_perf.max_per_pipe_ib[i]);
 	}
 
@@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
 
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
-	struct dpu_crtc *dpu_crtc;
-	struct dpu_kms *dpu_kms;
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
 	static const struct file_operations debugfs_status_fops = {
 		.open =		_dpu_debugfs_status_open,
@@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 		.release =	single_release,
 	};
 
-	if (!crtc)
-		return -EINVAL;
-	dpu_crtc = to_dpu_crtc(crtc);
-
-	dpu_kms = _dpu_crtc_get_kms(crtc);
-
 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
 			crtc->dev->primary->debugfs_root);
 	if (!dpu_crtc->debugfs_root)
@@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 
 	return 0;
 }
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-	struct dpu_crtc *dpu_crtc;
-
-	if (!crtc)
-		return;
-	dpu_crtc = to_dpu_crtc(crtc);
-	debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
 #else
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
 	return 0;
 }
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
 #endif /* CONFIG_DEBUG_FS */
 
 static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
 
 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
 {
-	_dpu_crtc_destroy_debugfs(crtc);
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+	debugfs_remove_recursive(dpu_crtc->debugfs_root);
 }
 
 static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
 };
 
 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
-	.disable = dpu_crtc_disable,
+	.atomic_disable = dpu_crtc_disable,
 	.atomic_enable = dpu_crtc_enable,
 	.atomic_check = dpu_crtc_atomic_check,
 	.atomic_begin = dpu_crtc_atomic_begin,
@@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
 	crtc = &dpu_crtc->base;
 	crtc->dev = dev;
 
-	mutex_init(&dpu_crtc->crtc_lock);
 	spin_lock_init(&dpu_crtc->spin_lock);
 	atomic_set(&dpu_crtc->frame_pending, 0);
 
@@ -1601,8 +1428,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
 	/* initialize event handling */
 	spin_lock_init(&dpu_crtc->event_lock);
 
-	dpu_crtc->phandle = &kms->phandle;
-
 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 3723b48..dbfb38a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
  * @vblank_cb_count : count of vblank callback since last reset
  * @play_count    : frame count between crtc enable and disable
  * @vblank_cb_time  : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend         : whether or not a suspend operation is in progress
  * @enabled       : whether the DPU CRTC is currently enabled. updated in the
  *                  commit-thread, not state-swap time which is earlier, so
  *                  safe to make decisions on during VBLANK on/off work
@@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
  * @dirty_list    : list of color processing features are dirty
  * @ad_dirty: list containing ad properties that are dirty
  * @ad_active: list containing ad properties that are active
- * @crtc_lock     : crtc lock around create, destroy and access.
  * @frame_pending : Whether or not an update is pending
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
@@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
  * @event_worker  : Event worker queue
  * @event_lock    : Spinlock around event handling code
  * @phandle: Pointer to power handler
- * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
  */
 struct dpu_crtc {
@@ -168,8 +164,6 @@ struct dpu_crtc {
 	u32 vblank_cb_count;
 	u64 play_count;
 	ktime_t vblank_cb_time;
-	bool vblank_requested;
-	bool suspend;
 	bool enabled;
 
 	struct list_head feature_list;
@@ -178,8 +172,6 @@ struct dpu_crtc {
 	struct list_head ad_dirty;
 	struct list_head ad_active;
 
-	struct mutex crtc_lock;
-
 	atomic_t frame_pending;
 	struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
 	struct list_head frame_event_list;
@@ -189,9 +181,6 @@ struct dpu_crtc {
 	/* for handling internal event thread */
 	spinlock_t event_lock;
 
-	struct dpu_power_handle *phandle;
-	struct dpu_power_event *power_event;
-
 	struct dpu_core_perf_params cur_perf;
 
 	struct dpu_crtc_smmu_state_data smmu_state;
@@ -238,41 +227,12 @@ struct dpu_crtc_state {
 	container_of(x, struct dpu_crtc_state, base)
 
 /**
- * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
- * @cstate: Pointer to dpu crtc state
- * @Return: true - has two mixers, false - has one mixer
- */
-static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
-{
-	return cstate->num_mixers == CRTC_DUAL_MIXERS;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
-		struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
-	if (!dpu_crtc || !cstate || !mode)
-		return 0;
-
-	return mode->vdisplay;
-}
-
-/**
  * dpu_crtc_frame_pending - retun the number of pending frames
  * @crtc: Pointer to drm crtc object
  */
 static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
 {
-	struct dpu_crtc *dpu_crtc;
-
-	if (!crtc)
-		return -EINVAL;
-
-	dpu_crtc = to_dpu_crtc(crtc);
-	return atomic_read(&dpu_crtc->frame_pending);
+	return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
 }
 
 /**
@@ -283,10 +243,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
 
 /**
- * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
  * @crtc: Pointer to drm crtc object
  */
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @async: true if the commit is asynchronous, false otherwise
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
 
 /**
  * dpu_crtc_complete_commit - callback signalling completion of current commit
@@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
 static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
 						struct drm_crtc *crtc)
 {
-	struct dpu_crtc_state *cstate =
-			crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
-	if (!cstate)
-		return NRT_CLIENT;
-
-	return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
-	return crtc ? crtc->enabled : false;
+	return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
 }
 
 #endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644
index ae2aee7..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU	DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT	DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN	80
-
-#define DBGBUS_FLAGS_DSPP	BIT(0)
-#define DBGBUS_DSPP_STATUS	0x34C
-
-#define DBGBUS_NAME_DPU		"dpu"
-#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0	0x188
-#define DBGBUS_AXI_INTF	0x194
-#define DBGBUS_SSPP1	0x298
-#define DBGBUS_DSPP	0x348
-#define DBGBUS_PERIPH	0x418
-
-#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON			0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
-#define MMSS_VBIF_TEST_BUS_OUT		0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR		0x190
-#define MMSS_VBIF_SRC_ERR		0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
-#define MMSS_VBIF_ERR_INFO		0X1a0
-#define MMSS_VBIF_ERR_INFO_1		0x1a4
-#define MMSS_VBIF_CLIENT_NUM		14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- *	may sub-ranges: sub-ranges are used for dumping
- *	or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len:  buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
-	struct list_head reg_base_head;
-	char name[REG_BASE_NAME_LEN];
-	void __iomem *base;
-	size_t off;
-	size_t cnt;
-	size_t max_offset;
-	char *buf;
-	size_t buf_len;
-	void (*cb)(void *ptr);
-	void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
-	u32 wr_addr;
-	u32 block_id;
-	u32 test_id;
-	void (*analyzer)(void __iomem *mem_base,
-				struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
-	u32 disable_bus_addr;
-	u32 block_bus_addr;
-	u32 bit_offset;
-	u32 block_cnt;
-	u32 test_pnt_start;
-	u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
-	char *name;
-	u32 enable_mask;
-	bool include_in_deferred_work;
-	u32 flags;
-	u32 entries_size;
-	u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
-	struct dpu_dbg_debug_bus_common cmn;
-	struct dpu_debug_bus_entry *entries;
-	u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
-	struct dpu_dbg_debug_bus_common cmn;
-	struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
-	struct list_head reg_base_list;
-	struct device *dev;
-
-	struct work_struct dump_work;
-
-	struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
-	struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
-		struct dpu_debug_bus_entry *entry, u32 val)
-{
-	dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
-		struct dpu_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & 0xFFF000))
-		return;
-
-	dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
-		struct dpu_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & BIT(15)))
-		return;
-
-	dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
-		struct dpu_debug_bus_entry *entry, u32 val)
-{
-	if (!(val & BIT(15)))
-		return;
-
-	dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
-			entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
-	/* Unpack 0 sspp 0*/
-	{ DBGBUS_SSPP0, 50, 2 },
-	{ DBGBUS_SSPP0, 60, 2 },
-	{ DBGBUS_SSPP0, 70, 2 },
-	{ DBGBUS_SSPP0, 85, 2 },
-
-	/* Upack 0 sspp 1*/
-	{ DBGBUS_SSPP1, 50, 2 },
-	{ DBGBUS_SSPP1, 60, 2 },
-	{ DBGBUS_SSPP1, 70, 2 },
-	{ DBGBUS_SSPP1, 85, 2 },
-
-	/* scheduler */
-	{ DBGBUS_DSPP, 130, 0 },
-	{ DBGBUS_DSPP, 130, 1 },
-	{ DBGBUS_DSPP, 130, 2 },
-	{ DBGBUS_DSPP, 130, 3 },
-	{ DBGBUS_DSPP, 130, 4 },
-	{ DBGBUS_DSPP, 130, 5 },
-
-	/* qseed */
-	{ DBGBUS_SSPP0, 6, 0},
-	{ DBGBUS_SSPP0, 6, 1},
-	{ DBGBUS_SSPP0, 26, 0},
-	{ DBGBUS_SSPP0, 26, 1},
-	{ DBGBUS_SSPP1, 6, 0},
-	{ DBGBUS_SSPP1, 6, 1},
-	{ DBGBUS_SSPP1, 26, 0},
-	{ DBGBUS_SSPP1, 26, 1},
-
-	/* scale */
-	{ DBGBUS_SSPP0, 16, 0},
-	{ DBGBUS_SSPP0, 16, 1},
-	{ DBGBUS_SSPP0, 36, 0},
-	{ DBGBUS_SSPP0, 36, 1},
-	{ DBGBUS_SSPP1, 16, 0},
-	{ DBGBUS_SSPP1, 16, 1},
-	{ DBGBUS_SSPP1, 36, 0},
-	{ DBGBUS_SSPP1, 36, 1},
-
-	/* fetch sspp0 */
-
-	/* vig 0 */
-	{ DBGBUS_SSPP0, 0, 0 },
-	{ DBGBUS_SSPP0, 0, 1 },
-	{ DBGBUS_SSPP0, 0, 2 },
-	{ DBGBUS_SSPP0, 0, 3 },
-	{ DBGBUS_SSPP0, 0, 4 },
-	{ DBGBUS_SSPP0, 0, 5 },
-	{ DBGBUS_SSPP0, 0, 6 },
-	{ DBGBUS_SSPP0, 0, 7 },
-
-	{ DBGBUS_SSPP0, 1, 0 },
-	{ DBGBUS_SSPP0, 1, 1 },
-	{ DBGBUS_SSPP0, 1, 2 },
-	{ DBGBUS_SSPP0, 1, 3 },
-	{ DBGBUS_SSPP0, 1, 4 },
-	{ DBGBUS_SSPP0, 1, 5 },
-	{ DBGBUS_SSPP0, 1, 6 },
-	{ DBGBUS_SSPP0, 1, 7 },
-
-	{ DBGBUS_SSPP0, 2, 0 },
-	{ DBGBUS_SSPP0, 2, 1 },
-	{ DBGBUS_SSPP0, 2, 2 },
-	{ DBGBUS_SSPP0, 2, 3 },
-	{ DBGBUS_SSPP0, 2, 4 },
-	{ DBGBUS_SSPP0, 2, 5 },
-	{ DBGBUS_SSPP0, 2, 6 },
-	{ DBGBUS_SSPP0, 2, 7 },
-
-	{ DBGBUS_SSPP0, 4, 0 },
-	{ DBGBUS_SSPP0, 4, 1 },
-	{ DBGBUS_SSPP0, 4, 2 },
-	{ DBGBUS_SSPP0, 4, 3 },
-	{ DBGBUS_SSPP0, 4, 4 },
-	{ DBGBUS_SSPP0, 4, 5 },
-	{ DBGBUS_SSPP0, 4, 6 },
-	{ DBGBUS_SSPP0, 4, 7 },
-
-	{ DBGBUS_SSPP0, 5, 0 },
-	{ DBGBUS_SSPP0, 5, 1 },
-	{ DBGBUS_SSPP0, 5, 2 },
-	{ DBGBUS_SSPP0, 5, 3 },
-	{ DBGBUS_SSPP0, 5, 4 },
-	{ DBGBUS_SSPP0, 5, 5 },
-	{ DBGBUS_SSPP0, 5, 6 },
-	{ DBGBUS_SSPP0, 5, 7 },
-
-	/* vig 2 */
-	{ DBGBUS_SSPP0, 20, 0 },
-	{ DBGBUS_SSPP0, 20, 1 },
-	{ DBGBUS_SSPP0, 20, 2 },
-	{ DBGBUS_SSPP0, 20, 3 },
-	{ DBGBUS_SSPP0, 20, 4 },
-	{ DBGBUS_SSPP0, 20, 5 },
-	{ DBGBUS_SSPP0, 20, 6 },
-	{ DBGBUS_SSPP0, 20, 7 },
-
-	{ DBGBUS_SSPP0, 21, 0 },
-	{ DBGBUS_SSPP0, 21, 1 },
-	{ DBGBUS_SSPP0, 21, 2 },
-	{ DBGBUS_SSPP0, 21, 3 },
-	{ DBGBUS_SSPP0, 21, 4 },
-	{ DBGBUS_SSPP0, 21, 5 },
-	{ DBGBUS_SSPP0, 21, 6 },
-	{ DBGBUS_SSPP0, 21, 7 },
-
-	{ DBGBUS_SSPP0, 22, 0 },
-	{ DBGBUS_SSPP0, 22, 1 },
-	{ DBGBUS_SSPP0, 22, 2 },
-	{ DBGBUS_SSPP0, 22, 3 },
-	{ DBGBUS_SSPP0, 22, 4 },
-	{ DBGBUS_SSPP0, 22, 5 },
-	{ DBGBUS_SSPP0, 22, 6 },
-	{ DBGBUS_SSPP0, 22, 7 },
-
-	{ DBGBUS_SSPP0, 24, 0 },
-	{ DBGBUS_SSPP0, 24, 1 },
-	{ DBGBUS_SSPP0, 24, 2 },
-	{ DBGBUS_SSPP0, 24, 3 },
-	{ DBGBUS_SSPP0, 24, 4 },
-	{ DBGBUS_SSPP0, 24, 5 },
-	{ DBGBUS_SSPP0, 24, 6 },
-	{ DBGBUS_SSPP0, 24, 7 },
-
-	{ DBGBUS_SSPP0, 25, 0 },
-	{ DBGBUS_SSPP0, 25, 1 },
-	{ DBGBUS_SSPP0, 25, 2 },
-	{ DBGBUS_SSPP0, 25, 3 },
-	{ DBGBUS_SSPP0, 25, 4 },
-	{ DBGBUS_SSPP0, 25, 5 },
-	{ DBGBUS_SSPP0, 25, 6 },
-	{ DBGBUS_SSPP0, 25, 7 },
-
-	/* dma 2 */
-	{ DBGBUS_SSPP0, 30, 0 },
-	{ DBGBUS_SSPP0, 30, 1 },
-	{ DBGBUS_SSPP0, 30, 2 },
-	{ DBGBUS_SSPP0, 30, 3 },
-	{ DBGBUS_SSPP0, 30, 4 },
-	{ DBGBUS_SSPP0, 30, 5 },
-	{ DBGBUS_SSPP0, 30, 6 },
-	{ DBGBUS_SSPP0, 30, 7 },
-
-	{ DBGBUS_SSPP0, 31, 0 },
-	{ DBGBUS_SSPP0, 31, 1 },
-	{ DBGBUS_SSPP0, 31, 2 },
-	{ DBGBUS_SSPP0, 31, 3 },
-	{ DBGBUS_SSPP0, 31, 4 },
-	{ DBGBUS_SSPP0, 31, 5 },
-	{ DBGBUS_SSPP0, 31, 6 },
-	{ DBGBUS_SSPP0, 31, 7 },
-
-	{ DBGBUS_SSPP0, 32, 0 },
-	{ DBGBUS_SSPP0, 32, 1 },
-	{ DBGBUS_SSPP0, 32, 2 },
-	{ DBGBUS_SSPP0, 32, 3 },
-	{ DBGBUS_SSPP0, 32, 4 },
-	{ DBGBUS_SSPP0, 32, 5 },
-	{ DBGBUS_SSPP0, 32, 6 },
-	{ DBGBUS_SSPP0, 32, 7 },
-
-	{ DBGBUS_SSPP0, 33, 0 },
-	{ DBGBUS_SSPP0, 33, 1 },
-	{ DBGBUS_SSPP0, 33, 2 },
-	{ DBGBUS_SSPP0, 33, 3 },
-	{ DBGBUS_SSPP0, 33, 4 },
-	{ DBGBUS_SSPP0, 33, 5 },
-	{ DBGBUS_SSPP0, 33, 6 },
-	{ DBGBUS_SSPP0, 33, 7 },
-
-	{ DBGBUS_SSPP0, 34, 0 },
-	{ DBGBUS_SSPP0, 34, 1 },
-	{ DBGBUS_SSPP0, 34, 2 },
-	{ DBGBUS_SSPP0, 34, 3 },
-	{ DBGBUS_SSPP0, 34, 4 },
-	{ DBGBUS_SSPP0, 34, 5 },
-	{ DBGBUS_SSPP0, 34, 6 },
-	{ DBGBUS_SSPP0, 34, 7 },
-
-	{ DBGBUS_SSPP0, 35, 0 },
-	{ DBGBUS_SSPP0, 35, 1 },
-	{ DBGBUS_SSPP0, 35, 2 },
-	{ DBGBUS_SSPP0, 35, 3 },
-
-	/* dma 0 */
-	{ DBGBUS_SSPP0, 40, 0 },
-	{ DBGBUS_SSPP0, 40, 1 },
-	{ DBGBUS_SSPP0, 40, 2 },
-	{ DBGBUS_SSPP0, 40, 3 },
-	{ DBGBUS_SSPP0, 40, 4 },
-	{ DBGBUS_SSPP0, 40, 5 },
-	{ DBGBUS_SSPP0, 40, 6 },
-	{ DBGBUS_SSPP0, 40, 7 },
-
-	{ DBGBUS_SSPP0, 41, 0 },
-	{ DBGBUS_SSPP0, 41, 1 },
-	{ DBGBUS_SSPP0, 41, 2 },
-	{ DBGBUS_SSPP0, 41, 3 },
-	{ DBGBUS_SSPP0, 41, 4 },
-	{ DBGBUS_SSPP0, 41, 5 },
-	{ DBGBUS_SSPP0, 41, 6 },
-	{ DBGBUS_SSPP0, 41, 7 },
-
-	{ DBGBUS_SSPP0, 42, 0 },
-	{ DBGBUS_SSPP0, 42, 1 },
-	{ DBGBUS_SSPP0, 42, 2 },
-	{ DBGBUS_SSPP0, 42, 3 },
-	{ DBGBUS_SSPP0, 42, 4 },
-	{ DBGBUS_SSPP0, 42, 5 },
-	{ DBGBUS_SSPP0, 42, 6 },
-	{ DBGBUS_SSPP0, 42, 7 },
-
-	{ DBGBUS_SSPP0, 44, 0 },
-	{ DBGBUS_SSPP0, 44, 1 },
-	{ DBGBUS_SSPP0, 44, 2 },
-	{ DBGBUS_SSPP0, 44, 3 },
-	{ DBGBUS_SSPP0, 44, 4 },
-	{ DBGBUS_SSPP0, 44, 5 },
-	{ DBGBUS_SSPP0, 44, 6 },
-	{ DBGBUS_SSPP0, 44, 7 },
-
-	{ DBGBUS_SSPP0, 45, 0 },
-	{ DBGBUS_SSPP0, 45, 1 },
-	{ DBGBUS_SSPP0, 45, 2 },
-	{ DBGBUS_SSPP0, 45, 3 },
-	{ DBGBUS_SSPP0, 45, 4 },
-	{ DBGBUS_SSPP0, 45, 5 },
-	{ DBGBUS_SSPP0, 45, 6 },
-	{ DBGBUS_SSPP0, 45, 7 },
-
-	/* fetch sspp1 */
-	/* vig 1 */
-	{ DBGBUS_SSPP1, 0, 0 },
-	{ DBGBUS_SSPP1, 0, 1 },
-	{ DBGBUS_SSPP1, 0, 2 },
-	{ DBGBUS_SSPP1, 0, 3 },
-	{ DBGBUS_SSPP1, 0, 4 },
-	{ DBGBUS_SSPP1, 0, 5 },
-	{ DBGBUS_SSPP1, 0, 6 },
-	{ DBGBUS_SSPP1, 0, 7 },
-
-	{ DBGBUS_SSPP1, 1, 0 },
-	{ DBGBUS_SSPP1, 1, 1 },
-	{ DBGBUS_SSPP1, 1, 2 },
-	{ DBGBUS_SSPP1, 1, 3 },
-	{ DBGBUS_SSPP1, 1, 4 },
-	{ DBGBUS_SSPP1, 1, 5 },
-	{ DBGBUS_SSPP1, 1, 6 },
-	{ DBGBUS_SSPP1, 1, 7 },
-
-	{ DBGBUS_SSPP1, 2, 0 },
-	{ DBGBUS_SSPP1, 2, 1 },
-	{ DBGBUS_SSPP1, 2, 2 },
-	{ DBGBUS_SSPP1, 2, 3 },
-	{ DBGBUS_SSPP1, 2, 4 },
-	{ DBGBUS_SSPP1, 2, 5 },
-	{ DBGBUS_SSPP1, 2, 6 },
-	{ DBGBUS_SSPP1, 2, 7 },
-
-	{ DBGBUS_SSPP1, 4, 0 },
-	{ DBGBUS_SSPP1, 4, 1 },
-	{ DBGBUS_SSPP1, 4, 2 },
-	{ DBGBUS_SSPP1, 4, 3 },
-	{ DBGBUS_SSPP1, 4, 4 },
-	{ DBGBUS_SSPP1, 4, 5 },
-	{ DBGBUS_SSPP1, 4, 6 },
-	{ DBGBUS_SSPP1, 4, 7 },
-
-	{ DBGBUS_SSPP1, 5, 0 },
-	{ DBGBUS_SSPP1, 5, 1 },
-	{ DBGBUS_SSPP1, 5, 2 },
-	{ DBGBUS_SSPP1, 5, 3 },
-	{ DBGBUS_SSPP1, 5, 4 },
-	{ DBGBUS_SSPP1, 5, 5 },
-	{ DBGBUS_SSPP1, 5, 6 },
-	{ DBGBUS_SSPP1, 5, 7 },
-
-	/* vig 3 */
-	{ DBGBUS_SSPP1, 20, 0 },
-	{ DBGBUS_SSPP1, 20, 1 },
-	{ DBGBUS_SSPP1, 20, 2 },
-	{ DBGBUS_SSPP1, 20, 3 },
-	{ DBGBUS_SSPP1, 20, 4 },
-	{ DBGBUS_SSPP1, 20, 5 },
-	{ DBGBUS_SSPP1, 20, 6 },
-	{ DBGBUS_SSPP1, 20, 7 },
-
-	{ DBGBUS_SSPP1, 21, 0 },
-	{ DBGBUS_SSPP1, 21, 1 },
-	{ DBGBUS_SSPP1, 21, 2 },
-	{ DBGBUS_SSPP1, 21, 3 },
-	{ DBGBUS_SSPP1, 21, 4 },
-	{ DBGBUS_SSPP1, 21, 5 },
-	{ DBGBUS_SSPP1, 21, 6 },
-	{ DBGBUS_SSPP1, 21, 7 },
-
-	{ DBGBUS_SSPP1, 22, 0 },
-	{ DBGBUS_SSPP1, 22, 1 },
-	{ DBGBUS_SSPP1, 22, 2 },
-	{ DBGBUS_SSPP1, 22, 3 },
-	{ DBGBUS_SSPP1, 22, 4 },
-	{ DBGBUS_SSPP1, 22, 5 },
-	{ DBGBUS_SSPP1, 22, 6 },
-	{ DBGBUS_SSPP1, 22, 7 },
-
-	{ DBGBUS_SSPP1, 24, 0 },
-	{ DBGBUS_SSPP1, 24, 1 },
-	{ DBGBUS_SSPP1, 24, 2 },
-	{ DBGBUS_SSPP1, 24, 3 },
-	{ DBGBUS_SSPP1, 24, 4 },
-	{ DBGBUS_SSPP1, 24, 5 },
-	{ DBGBUS_SSPP1, 24, 6 },
-	{ DBGBUS_SSPP1, 24, 7 },
-
-	{ DBGBUS_SSPP1, 25, 0 },
-	{ DBGBUS_SSPP1, 25, 1 },
-	{ DBGBUS_SSPP1, 25, 2 },
-	{ DBGBUS_SSPP1, 25, 3 },
-	{ DBGBUS_SSPP1, 25, 4 },
-	{ DBGBUS_SSPP1, 25, 5 },
-	{ DBGBUS_SSPP1, 25, 6 },
-	{ DBGBUS_SSPP1, 25, 7 },
-
-	/* dma 3 */
-	{ DBGBUS_SSPP1, 30, 0 },
-	{ DBGBUS_SSPP1, 30, 1 },
-	{ DBGBUS_SSPP1, 30, 2 },
-	{ DBGBUS_SSPP1, 30, 3 },
-	{ DBGBUS_SSPP1, 30, 4 },
-	{ DBGBUS_SSPP1, 30, 5 },
-	{ DBGBUS_SSPP1, 30, 6 },
-	{ DBGBUS_SSPP1, 30, 7 },
-
-	{ DBGBUS_SSPP1, 31, 0 },
-	{ DBGBUS_SSPP1, 31, 1 },
-	{ DBGBUS_SSPP1, 31, 2 },
-	{ DBGBUS_SSPP1, 31, 3 },
-	{ DBGBUS_SSPP1, 31, 4 },
-	{ DBGBUS_SSPP1, 31, 5 },
-	{ DBGBUS_SSPP1, 31, 6 },
-	{ DBGBUS_SSPP1, 31, 7 },
-
-	{ DBGBUS_SSPP1, 32, 0 },
-	{ DBGBUS_SSPP1, 32, 1 },
-	{ DBGBUS_SSPP1, 32, 2 },
-	{ DBGBUS_SSPP1, 32, 3 },
-	{ DBGBUS_SSPP1, 32, 4 },
-	{ DBGBUS_SSPP1, 32, 5 },
-	{ DBGBUS_SSPP1, 32, 6 },
-	{ DBGBUS_SSPP1, 32, 7 },
-
-	{ DBGBUS_SSPP1, 33, 0 },
-	{ DBGBUS_SSPP1, 33, 1 },
-	{ DBGBUS_SSPP1, 33, 2 },
-	{ DBGBUS_SSPP1, 33, 3 },
-	{ DBGBUS_SSPP1, 33, 4 },
-	{ DBGBUS_SSPP1, 33, 5 },
-	{ DBGBUS_SSPP1, 33, 6 },
-	{ DBGBUS_SSPP1, 33, 7 },
-
-	{ DBGBUS_SSPP1, 34, 0 },
-	{ DBGBUS_SSPP1, 34, 1 },
-	{ DBGBUS_SSPP1, 34, 2 },
-	{ DBGBUS_SSPP1, 34, 3 },
-	{ DBGBUS_SSPP1, 34, 4 },
-	{ DBGBUS_SSPP1, 34, 5 },
-	{ DBGBUS_SSPP1, 34, 6 },
-	{ DBGBUS_SSPP1, 34, 7 },
-
-	{ DBGBUS_SSPP1, 35, 0 },
-	{ DBGBUS_SSPP1, 35, 1 },
-	{ DBGBUS_SSPP1, 35, 2 },
-
-	/* dma 1 */
-	{ DBGBUS_SSPP1, 40, 0 },
-	{ DBGBUS_SSPP1, 40, 1 },
-	{ DBGBUS_SSPP1, 40, 2 },
-	{ DBGBUS_SSPP1, 40, 3 },
-	{ DBGBUS_SSPP1, 40, 4 },
-	{ DBGBUS_SSPP1, 40, 5 },
-	{ DBGBUS_SSPP1, 40, 6 },
-	{ DBGBUS_SSPP1, 40, 7 },
-
-	{ DBGBUS_SSPP1, 41, 0 },
-	{ DBGBUS_SSPP1, 41, 1 },
-	{ DBGBUS_SSPP1, 41, 2 },
-	{ DBGBUS_SSPP1, 41, 3 },
-	{ DBGBUS_SSPP1, 41, 4 },
-	{ DBGBUS_SSPP1, 41, 5 },
-	{ DBGBUS_SSPP1, 41, 6 },
-	{ DBGBUS_SSPP1, 41, 7 },
-
-	{ DBGBUS_SSPP1, 42, 0 },
-	{ DBGBUS_SSPP1, 42, 1 },
-	{ DBGBUS_SSPP1, 42, 2 },
-	{ DBGBUS_SSPP1, 42, 3 },
-	{ DBGBUS_SSPP1, 42, 4 },
-	{ DBGBUS_SSPP1, 42, 5 },
-	{ DBGBUS_SSPP1, 42, 6 },
-	{ DBGBUS_SSPP1, 42, 7 },
-
-	{ DBGBUS_SSPP1, 44, 0 },
-	{ DBGBUS_SSPP1, 44, 1 },
-	{ DBGBUS_SSPP1, 44, 2 },
-	{ DBGBUS_SSPP1, 44, 3 },
-	{ DBGBUS_SSPP1, 44, 4 },
-	{ DBGBUS_SSPP1, 44, 5 },
-	{ DBGBUS_SSPP1, 44, 6 },
-	{ DBGBUS_SSPP1, 44, 7 },
-
-	{ DBGBUS_SSPP1, 45, 0 },
-	{ DBGBUS_SSPP1, 45, 1 },
-	{ DBGBUS_SSPP1, 45, 2 },
-	{ DBGBUS_SSPP1, 45, 3 },
-	{ DBGBUS_SSPP1, 45, 4 },
-	{ DBGBUS_SSPP1, 45, 5 },
-	{ DBGBUS_SSPP1, 45, 6 },
-	{ DBGBUS_SSPP1, 45, 7 },
-
-	/* cursor 1 */
-	{ DBGBUS_SSPP1, 80, 0 },
-	{ DBGBUS_SSPP1, 80, 1 },
-	{ DBGBUS_SSPP1, 80, 2 },
-	{ DBGBUS_SSPP1, 80, 3 },
-	{ DBGBUS_SSPP1, 80, 4 },
-	{ DBGBUS_SSPP1, 80, 5 },
-	{ DBGBUS_SSPP1, 80, 6 },
-	{ DBGBUS_SSPP1, 80, 7 },
-
-	{ DBGBUS_SSPP1, 81, 0 },
-	{ DBGBUS_SSPP1, 81, 1 },
-	{ DBGBUS_SSPP1, 81, 2 },
-	{ DBGBUS_SSPP1, 81, 3 },
-	{ DBGBUS_SSPP1, 81, 4 },
-	{ DBGBUS_SSPP1, 81, 5 },
-	{ DBGBUS_SSPP1, 81, 6 },
-	{ DBGBUS_SSPP1, 81, 7 },
-
-	{ DBGBUS_SSPP1, 82, 0 },
-	{ DBGBUS_SSPP1, 82, 1 },
-	{ DBGBUS_SSPP1, 82, 2 },
-	{ DBGBUS_SSPP1, 82, 3 },
-	{ DBGBUS_SSPP1, 82, 4 },
-	{ DBGBUS_SSPP1, 82, 5 },
-	{ DBGBUS_SSPP1, 82, 6 },
-	{ DBGBUS_SSPP1, 82, 7 },
-
-	{ DBGBUS_SSPP1, 83, 0 },
-	{ DBGBUS_SSPP1, 83, 1 },
-	{ DBGBUS_SSPP1, 83, 2 },
-	{ DBGBUS_SSPP1, 83, 3 },
-	{ DBGBUS_SSPP1, 83, 4 },
-	{ DBGBUS_SSPP1, 83, 5 },
-	{ DBGBUS_SSPP1, 83, 6 },
-	{ DBGBUS_SSPP1, 83, 7 },
-
-	{ DBGBUS_SSPP1, 84, 0 },
-	{ DBGBUS_SSPP1, 84, 1 },
-	{ DBGBUS_SSPP1, 84, 2 },
-	{ DBGBUS_SSPP1, 84, 3 },
-	{ DBGBUS_SSPP1, 84, 4 },
-	{ DBGBUS_SSPP1, 84, 5 },
-	{ DBGBUS_SSPP1, 84, 6 },
-	{ DBGBUS_SSPP1, 84, 7 },
-
-	/* dspp */
-	{ DBGBUS_DSPP, 13, 0 },
-	{ DBGBUS_DSPP, 19, 0 },
-	{ DBGBUS_DSPP, 14, 0 },
-	{ DBGBUS_DSPP, 14, 1 },
-	{ DBGBUS_DSPP, 14, 3 },
-	{ DBGBUS_DSPP, 20, 0 },
-	{ DBGBUS_DSPP, 20, 1 },
-	{ DBGBUS_DSPP, 20, 3 },
-
-	/* ppb_0 */
-	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
-	/* ppb_1 */
-	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
-	/* lm_lut */
-	{ DBGBUS_DSPP, 109, 0 },
-	{ DBGBUS_DSPP, 105, 0 },
-	{ DBGBUS_DSPP, 103, 0 },
-
-	/* tear-check */
-	{ DBGBUS_PERIPH, 63, 0 },
-	{ DBGBUS_PERIPH, 64, 0 },
-	{ DBGBUS_PERIPH, 65, 0 },
-	{ DBGBUS_PERIPH, 73, 0 },
-	{ DBGBUS_PERIPH, 74, 0 },
-
-	/* crossbar */
-	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
-	/* rotator */
-	{ DBGBUS_DSPP, 9, 0},
-
-	/* blend */
-	/* LM0 */
-	{ DBGBUS_DSPP, 63, 0},
-	{ DBGBUS_DSPP, 63, 1},
-	{ DBGBUS_DSPP, 63, 2},
-	{ DBGBUS_DSPP, 63, 3},
-	{ DBGBUS_DSPP, 63, 4},
-	{ DBGBUS_DSPP, 63, 5},
-	{ DBGBUS_DSPP, 63, 6},
-	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 64, 0},
-	{ DBGBUS_DSPP, 64, 1},
-	{ DBGBUS_DSPP, 64, 2},
-	{ DBGBUS_DSPP, 64, 3},
-	{ DBGBUS_DSPP, 64, 4},
-	{ DBGBUS_DSPP, 64, 5},
-	{ DBGBUS_DSPP, 64, 6},
-	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 65, 0},
-	{ DBGBUS_DSPP, 65, 1},
-	{ DBGBUS_DSPP, 65, 2},
-	{ DBGBUS_DSPP, 65, 3},
-	{ DBGBUS_DSPP, 65, 4},
-	{ DBGBUS_DSPP, 65, 5},
-	{ DBGBUS_DSPP, 65, 6},
-	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 66, 0},
-	{ DBGBUS_DSPP, 66, 1},
-	{ DBGBUS_DSPP, 66, 2},
-	{ DBGBUS_DSPP, 66, 3},
-	{ DBGBUS_DSPP, 66, 4},
-	{ DBGBUS_DSPP, 66, 5},
-	{ DBGBUS_DSPP, 66, 6},
-	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 67, 0},
-	{ DBGBUS_DSPP, 67, 1},
-	{ DBGBUS_DSPP, 67, 2},
-	{ DBGBUS_DSPP, 67, 3},
-	{ DBGBUS_DSPP, 67, 4},
-	{ DBGBUS_DSPP, 67, 5},
-	{ DBGBUS_DSPP, 67, 6},
-	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 68, 0},
-	{ DBGBUS_DSPP, 68, 1},
-	{ DBGBUS_DSPP, 68, 2},
-	{ DBGBUS_DSPP, 68, 3},
-	{ DBGBUS_DSPP, 68, 4},
-	{ DBGBUS_DSPP, 68, 5},
-	{ DBGBUS_DSPP, 68, 6},
-	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 69, 0},
-	{ DBGBUS_DSPP, 69, 1},
-	{ DBGBUS_DSPP, 69, 2},
-	{ DBGBUS_DSPP, 69, 3},
-	{ DBGBUS_DSPP, 69, 4},
-	{ DBGBUS_DSPP, 69, 5},
-	{ DBGBUS_DSPP, 69, 6},
-	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
-	/* LM1 */
-	{ DBGBUS_DSPP, 70, 0},
-	{ DBGBUS_DSPP, 70, 1},
-	{ DBGBUS_DSPP, 70, 2},
-	{ DBGBUS_DSPP, 70, 3},
-	{ DBGBUS_DSPP, 70, 4},
-	{ DBGBUS_DSPP, 70, 5},
-	{ DBGBUS_DSPP, 70, 6},
-	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 71, 0},
-	{ DBGBUS_DSPP, 71, 1},
-	{ DBGBUS_DSPP, 71, 2},
-	{ DBGBUS_DSPP, 71, 3},
-	{ DBGBUS_DSPP, 71, 4},
-	{ DBGBUS_DSPP, 71, 5},
-	{ DBGBUS_DSPP, 71, 6},
-	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 72, 0},
-	{ DBGBUS_DSPP, 72, 1},
-	{ DBGBUS_DSPP, 72, 2},
-	{ DBGBUS_DSPP, 72, 3},
-	{ DBGBUS_DSPP, 72, 4},
-	{ DBGBUS_DSPP, 72, 5},
-	{ DBGBUS_DSPP, 72, 6},
-	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 73, 0},
-	{ DBGBUS_DSPP, 73, 1},
-	{ DBGBUS_DSPP, 73, 2},
-	{ DBGBUS_DSPP, 73, 3},
-	{ DBGBUS_DSPP, 73, 4},
-	{ DBGBUS_DSPP, 73, 5},
-	{ DBGBUS_DSPP, 73, 6},
-	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 74, 0},
-	{ DBGBUS_DSPP, 74, 1},
-	{ DBGBUS_DSPP, 74, 2},
-	{ DBGBUS_DSPP, 74, 3},
-	{ DBGBUS_DSPP, 74, 4},
-	{ DBGBUS_DSPP, 74, 5},
-	{ DBGBUS_DSPP, 74, 6},
-	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 75, 0},
-	{ DBGBUS_DSPP, 75, 1},
-	{ DBGBUS_DSPP, 75, 2},
-	{ DBGBUS_DSPP, 75, 3},
-	{ DBGBUS_DSPP, 75, 4},
-	{ DBGBUS_DSPP, 75, 5},
-	{ DBGBUS_DSPP, 75, 6},
-	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 76, 0},
-	{ DBGBUS_DSPP, 76, 1},
-	{ DBGBUS_DSPP, 76, 2},
-	{ DBGBUS_DSPP, 76, 3},
-	{ DBGBUS_DSPP, 76, 4},
-	{ DBGBUS_DSPP, 76, 5},
-	{ DBGBUS_DSPP, 76, 6},
-	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
-	/* LM2 */
-	{ DBGBUS_DSPP, 77, 0},
-	{ DBGBUS_DSPP, 77, 1},
-	{ DBGBUS_DSPP, 77, 2},
-	{ DBGBUS_DSPP, 77, 3},
-	{ DBGBUS_DSPP, 77, 4},
-	{ DBGBUS_DSPP, 77, 5},
-	{ DBGBUS_DSPP, 77, 6},
-	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 78, 0},
-	{ DBGBUS_DSPP, 78, 1},
-	{ DBGBUS_DSPP, 78, 2},
-	{ DBGBUS_DSPP, 78, 3},
-	{ DBGBUS_DSPP, 78, 4},
-	{ DBGBUS_DSPP, 78, 5},
-	{ DBGBUS_DSPP, 78, 6},
-	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 79, 0},
-	{ DBGBUS_DSPP, 79, 1},
-	{ DBGBUS_DSPP, 79, 2},
-	{ DBGBUS_DSPP, 79, 3},
-	{ DBGBUS_DSPP, 79, 4},
-	{ DBGBUS_DSPP, 79, 5},
-	{ DBGBUS_DSPP, 79, 6},
-	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 80, 0},
-	{ DBGBUS_DSPP, 80, 1},
-	{ DBGBUS_DSPP, 80, 2},
-	{ DBGBUS_DSPP, 80, 3},
-	{ DBGBUS_DSPP, 80, 4},
-	{ DBGBUS_DSPP, 80, 5},
-	{ DBGBUS_DSPP, 80, 6},
-	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 81, 0},
-	{ DBGBUS_DSPP, 81, 1},
-	{ DBGBUS_DSPP, 81, 2},
-	{ DBGBUS_DSPP, 81, 3},
-	{ DBGBUS_DSPP, 81, 4},
-	{ DBGBUS_DSPP, 81, 5},
-	{ DBGBUS_DSPP, 81, 6},
-	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 82, 0},
-	{ DBGBUS_DSPP, 82, 1},
-	{ DBGBUS_DSPP, 82, 2},
-	{ DBGBUS_DSPP, 82, 3},
-	{ DBGBUS_DSPP, 82, 4},
-	{ DBGBUS_DSPP, 82, 5},
-	{ DBGBUS_DSPP, 82, 6},
-	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 83, 0},
-	{ DBGBUS_DSPP, 83, 1},
-	{ DBGBUS_DSPP, 83, 2},
-	{ DBGBUS_DSPP, 83, 3},
-	{ DBGBUS_DSPP, 83, 4},
-	{ DBGBUS_DSPP, 83, 5},
-	{ DBGBUS_DSPP, 83, 6},
-	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
-	/* csc */
-	{ DBGBUS_SSPP0, 7, 0},
-	{ DBGBUS_SSPP0, 7, 1},
-	{ DBGBUS_SSPP0, 27, 0},
-	{ DBGBUS_SSPP0, 27, 1},
-	{ DBGBUS_SSPP1, 7, 0},
-	{ DBGBUS_SSPP1, 7, 1},
-	{ DBGBUS_SSPP1, 27, 0},
-	{ DBGBUS_SSPP1, 27, 1},
-
-	/* pcc */
-	{ DBGBUS_SSPP0, 3,  3},
-	{ DBGBUS_SSPP0, 23, 3},
-	{ DBGBUS_SSPP0, 33, 3},
-	{ DBGBUS_SSPP0, 43, 3},
-	{ DBGBUS_SSPP1, 3,  3},
-	{ DBGBUS_SSPP1, 23, 3},
-	{ DBGBUS_SSPP1, 33, 3},
-	{ DBGBUS_SSPP1, 43, 3},
-
-	/* spa */
-	{ DBGBUS_SSPP0, 8,  0},
-	{ DBGBUS_SSPP0, 28, 0},
-	{ DBGBUS_SSPP1, 8,  0},
-	{ DBGBUS_SSPP1, 28, 0},
-	{ DBGBUS_DSPP, 13, 0},
-	{ DBGBUS_DSPP, 19, 0},
-
-	/* igc */
-	{ DBGBUS_SSPP0, 9,  0},
-	{ DBGBUS_SSPP0, 9,  1},
-	{ DBGBUS_SSPP0, 9,  3},
-	{ DBGBUS_SSPP0, 29, 0},
-	{ DBGBUS_SSPP0, 29, 1},
-	{ DBGBUS_SSPP0, 29, 3},
-	{ DBGBUS_SSPP0, 17, 0},
-	{ DBGBUS_SSPP0, 17, 1},
-	{ DBGBUS_SSPP0, 17, 3},
-	{ DBGBUS_SSPP0, 37, 0},
-	{ DBGBUS_SSPP0, 37, 1},
-	{ DBGBUS_SSPP0, 37, 3},
-	{ DBGBUS_SSPP0, 46, 0},
-	{ DBGBUS_SSPP0, 46, 1},
-	{ DBGBUS_SSPP0, 46, 3},
-
-	{ DBGBUS_SSPP1, 9,  0},
-	{ DBGBUS_SSPP1, 9,  1},
-	{ DBGBUS_SSPP1, 9,  3},
-	{ DBGBUS_SSPP1, 29, 0},
-	{ DBGBUS_SSPP1, 29, 1},
-	{ DBGBUS_SSPP1, 29, 3},
-	{ DBGBUS_SSPP1, 17, 0},
-	{ DBGBUS_SSPP1, 17, 1},
-	{ DBGBUS_SSPP1, 17, 3},
-	{ DBGBUS_SSPP1, 37, 0},
-	{ DBGBUS_SSPP1, 37, 1},
-	{ DBGBUS_SSPP1, 37, 3},
-	{ DBGBUS_SSPP1, 46, 0},
-	{ DBGBUS_SSPP1, 46, 1},
-	{ DBGBUS_SSPP1, 46, 3},
-
-	{ DBGBUS_DSPP, 14, 0},
-	{ DBGBUS_DSPP, 14, 1},
-	{ DBGBUS_DSPP, 14, 3},
-	{ DBGBUS_DSPP, 20, 0},
-	{ DBGBUS_DSPP, 20, 1},
-	{ DBGBUS_DSPP, 20, 3},
-
-	{ DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
-	/* Unpack 0 sspp 0*/
-	{ DBGBUS_SSPP0, 50, 2 },
-	{ DBGBUS_SSPP0, 60, 2 },
-	{ DBGBUS_SSPP0, 70, 2 },
-
-	/* Upack 0 sspp 1*/
-	{ DBGBUS_SSPP1, 50, 2 },
-	{ DBGBUS_SSPP1, 60, 2 },
-	{ DBGBUS_SSPP1, 70, 2 },
-
-	/* scheduler */
-	{ DBGBUS_DSPP, 130, 0 },
-	{ DBGBUS_DSPP, 130, 1 },
-	{ DBGBUS_DSPP, 130, 2 },
-	{ DBGBUS_DSPP, 130, 3 },
-	{ DBGBUS_DSPP, 130, 4 },
-	{ DBGBUS_DSPP, 130, 5 },
-
-	/* qseed */
-	{ DBGBUS_SSPP0, 6, 0},
-	{ DBGBUS_SSPP0, 6, 1},
-	{ DBGBUS_SSPP0, 26, 0},
-	{ DBGBUS_SSPP0, 26, 1},
-	{ DBGBUS_SSPP1, 6, 0},
-	{ DBGBUS_SSPP1, 6, 1},
-	{ DBGBUS_SSPP1, 26, 0},
-	{ DBGBUS_SSPP1, 26, 1},
-
-	/* scale */
-	{ DBGBUS_SSPP0, 16, 0},
-	{ DBGBUS_SSPP0, 16, 1},
-	{ DBGBUS_SSPP0, 36, 0},
-	{ DBGBUS_SSPP0, 36, 1},
-	{ DBGBUS_SSPP1, 16, 0},
-	{ DBGBUS_SSPP1, 16, 1},
-	{ DBGBUS_SSPP1, 36, 0},
-	{ DBGBUS_SSPP1, 36, 1},
-
-	/* fetch sspp0 */
-
-	/* vig 0 */
-	{ DBGBUS_SSPP0, 0, 0 },
-	{ DBGBUS_SSPP0, 0, 1 },
-	{ DBGBUS_SSPP0, 0, 2 },
-	{ DBGBUS_SSPP0, 0, 3 },
-	{ DBGBUS_SSPP0, 0, 4 },
-	{ DBGBUS_SSPP0, 0, 5 },
-	{ DBGBUS_SSPP0, 0, 6 },
-	{ DBGBUS_SSPP0, 0, 7 },
-
-	{ DBGBUS_SSPP0, 1, 0 },
-	{ DBGBUS_SSPP0, 1, 1 },
-	{ DBGBUS_SSPP0, 1, 2 },
-	{ DBGBUS_SSPP0, 1, 3 },
-	{ DBGBUS_SSPP0, 1, 4 },
-	{ DBGBUS_SSPP0, 1, 5 },
-	{ DBGBUS_SSPP0, 1, 6 },
-	{ DBGBUS_SSPP0, 1, 7 },
-
-	{ DBGBUS_SSPP0, 2, 0 },
-	{ DBGBUS_SSPP0, 2, 1 },
-	{ DBGBUS_SSPP0, 2, 2 },
-	{ DBGBUS_SSPP0, 2, 3 },
-	{ DBGBUS_SSPP0, 2, 4 },
-	{ DBGBUS_SSPP0, 2, 5 },
-	{ DBGBUS_SSPP0, 2, 6 },
-	{ DBGBUS_SSPP0, 2, 7 },
-
-	{ DBGBUS_SSPP0, 4, 0 },
-	{ DBGBUS_SSPP0, 4, 1 },
-	{ DBGBUS_SSPP0, 4, 2 },
-	{ DBGBUS_SSPP0, 4, 3 },
-	{ DBGBUS_SSPP0, 4, 4 },
-	{ DBGBUS_SSPP0, 4, 5 },
-	{ DBGBUS_SSPP0, 4, 6 },
-	{ DBGBUS_SSPP0, 4, 7 },
-
-	{ DBGBUS_SSPP0, 5, 0 },
-	{ DBGBUS_SSPP0, 5, 1 },
-	{ DBGBUS_SSPP0, 5, 2 },
-	{ DBGBUS_SSPP0, 5, 3 },
-	{ DBGBUS_SSPP0, 5, 4 },
-	{ DBGBUS_SSPP0, 5, 5 },
-	{ DBGBUS_SSPP0, 5, 6 },
-	{ DBGBUS_SSPP0, 5, 7 },
-
-	/* vig 2 */
-	{ DBGBUS_SSPP0, 20, 0 },
-	{ DBGBUS_SSPP0, 20, 1 },
-	{ DBGBUS_SSPP0, 20, 2 },
-	{ DBGBUS_SSPP0, 20, 3 },
-	{ DBGBUS_SSPP0, 20, 4 },
-	{ DBGBUS_SSPP0, 20, 5 },
-	{ DBGBUS_SSPP0, 20, 6 },
-	{ DBGBUS_SSPP0, 20, 7 },
-
-	{ DBGBUS_SSPP0, 21, 0 },
-	{ DBGBUS_SSPP0, 21, 1 },
-	{ DBGBUS_SSPP0, 21, 2 },
-	{ DBGBUS_SSPP0, 21, 3 },
-	{ DBGBUS_SSPP0, 21, 4 },
-	{ DBGBUS_SSPP0, 21, 5 },
-	{ DBGBUS_SSPP0, 21, 6 },
-	{ DBGBUS_SSPP0, 21, 7 },
-
-	{ DBGBUS_SSPP0, 22, 0 },
-	{ DBGBUS_SSPP0, 22, 1 },
-	{ DBGBUS_SSPP0, 22, 2 },
-	{ DBGBUS_SSPP0, 22, 3 },
-	{ DBGBUS_SSPP0, 22, 4 },
-	{ DBGBUS_SSPP0, 22, 5 },
-	{ DBGBUS_SSPP0, 22, 6 },
-	{ DBGBUS_SSPP0, 22, 7 },
-
-	{ DBGBUS_SSPP0, 24, 0 },
-	{ DBGBUS_SSPP0, 24, 1 },
-	{ DBGBUS_SSPP0, 24, 2 },
-	{ DBGBUS_SSPP0, 24, 3 },
-	{ DBGBUS_SSPP0, 24, 4 },
-	{ DBGBUS_SSPP0, 24, 5 },
-	{ DBGBUS_SSPP0, 24, 6 },
-	{ DBGBUS_SSPP0, 24, 7 },
-
-	{ DBGBUS_SSPP0, 25, 0 },
-	{ DBGBUS_SSPP0, 25, 1 },
-	{ DBGBUS_SSPP0, 25, 2 },
-	{ DBGBUS_SSPP0, 25, 3 },
-	{ DBGBUS_SSPP0, 25, 4 },
-	{ DBGBUS_SSPP0, 25, 5 },
-	{ DBGBUS_SSPP0, 25, 6 },
-	{ DBGBUS_SSPP0, 25, 7 },
-
-	/* dma 2 */
-	{ DBGBUS_SSPP0, 30, 0 },
-	{ DBGBUS_SSPP0, 30, 1 },
-	{ DBGBUS_SSPP0, 30, 2 },
-	{ DBGBUS_SSPP0, 30, 3 },
-	{ DBGBUS_SSPP0, 30, 4 },
-	{ DBGBUS_SSPP0, 30, 5 },
-	{ DBGBUS_SSPP0, 30, 6 },
-	{ DBGBUS_SSPP0, 30, 7 },
-
-	{ DBGBUS_SSPP0, 31, 0 },
-	{ DBGBUS_SSPP0, 31, 1 },
-	{ DBGBUS_SSPP0, 31, 2 },
-	{ DBGBUS_SSPP0, 31, 3 },
-	{ DBGBUS_SSPP0, 31, 4 },
-	{ DBGBUS_SSPP0, 31, 5 },
-	{ DBGBUS_SSPP0, 31, 6 },
-	{ DBGBUS_SSPP0, 31, 7 },
-
-	{ DBGBUS_SSPP0, 32, 0 },
-	{ DBGBUS_SSPP0, 32, 1 },
-	{ DBGBUS_SSPP0, 32, 2 },
-	{ DBGBUS_SSPP0, 32, 3 },
-	{ DBGBUS_SSPP0, 32, 4 },
-	{ DBGBUS_SSPP0, 32, 5 },
-	{ DBGBUS_SSPP0, 32, 6 },
-	{ DBGBUS_SSPP0, 32, 7 },
-
-	{ DBGBUS_SSPP0, 33, 0 },
-	{ DBGBUS_SSPP0, 33, 1 },
-	{ DBGBUS_SSPP0, 33, 2 },
-	{ DBGBUS_SSPP0, 33, 3 },
-	{ DBGBUS_SSPP0, 33, 4 },
-	{ DBGBUS_SSPP0, 33, 5 },
-	{ DBGBUS_SSPP0, 33, 6 },
-	{ DBGBUS_SSPP0, 33, 7 },
-
-	{ DBGBUS_SSPP0, 34, 0 },
-	{ DBGBUS_SSPP0, 34, 1 },
-	{ DBGBUS_SSPP0, 34, 2 },
-	{ DBGBUS_SSPP0, 34, 3 },
-	{ DBGBUS_SSPP0, 34, 4 },
-	{ DBGBUS_SSPP0, 34, 5 },
-	{ DBGBUS_SSPP0, 34, 6 },
-	{ DBGBUS_SSPP0, 34, 7 },
-
-	{ DBGBUS_SSPP0, 35, 0 },
-	{ DBGBUS_SSPP0, 35, 1 },
-	{ DBGBUS_SSPP0, 35, 2 },
-	{ DBGBUS_SSPP0, 35, 3 },
-
-	/* dma 0 */
-	{ DBGBUS_SSPP0, 40, 0 },
-	{ DBGBUS_SSPP0, 40, 1 },
-	{ DBGBUS_SSPP0, 40, 2 },
-	{ DBGBUS_SSPP0, 40, 3 },
-	{ DBGBUS_SSPP0, 40, 4 },
-	{ DBGBUS_SSPP0, 40, 5 },
-	{ DBGBUS_SSPP0, 40, 6 },
-	{ DBGBUS_SSPP0, 40, 7 },
-
-	{ DBGBUS_SSPP0, 41, 0 },
-	{ DBGBUS_SSPP0, 41, 1 },
-	{ DBGBUS_SSPP0, 41, 2 },
-	{ DBGBUS_SSPP0, 41, 3 },
-	{ DBGBUS_SSPP0, 41, 4 },
-	{ DBGBUS_SSPP0, 41, 5 },
-	{ DBGBUS_SSPP0, 41, 6 },
-	{ DBGBUS_SSPP0, 41, 7 },
-
-	{ DBGBUS_SSPP0, 42, 0 },
-	{ DBGBUS_SSPP0, 42, 1 },
-	{ DBGBUS_SSPP0, 42, 2 },
-	{ DBGBUS_SSPP0, 42, 3 },
-	{ DBGBUS_SSPP0, 42, 4 },
-	{ DBGBUS_SSPP0, 42, 5 },
-	{ DBGBUS_SSPP0, 42, 6 },
-	{ DBGBUS_SSPP0, 42, 7 },
-
-	{ DBGBUS_SSPP0, 44, 0 },
-	{ DBGBUS_SSPP0, 44, 1 },
-	{ DBGBUS_SSPP0, 44, 2 },
-	{ DBGBUS_SSPP0, 44, 3 },
-	{ DBGBUS_SSPP0, 44, 4 },
-	{ DBGBUS_SSPP0, 44, 5 },
-	{ DBGBUS_SSPP0, 44, 6 },
-	{ DBGBUS_SSPP0, 44, 7 },
-
-	{ DBGBUS_SSPP0, 45, 0 },
-	{ DBGBUS_SSPP0, 45, 1 },
-	{ DBGBUS_SSPP0, 45, 2 },
-	{ DBGBUS_SSPP0, 45, 3 },
-	{ DBGBUS_SSPP0, 45, 4 },
-	{ DBGBUS_SSPP0, 45, 5 },
-	{ DBGBUS_SSPP0, 45, 6 },
-	{ DBGBUS_SSPP0, 45, 7 },
-
-	/* fetch sspp1 */
-	/* vig 1 */
-	{ DBGBUS_SSPP1, 0, 0 },
-	{ DBGBUS_SSPP1, 0, 1 },
-	{ DBGBUS_SSPP1, 0, 2 },
-	{ DBGBUS_SSPP1, 0, 3 },
-	{ DBGBUS_SSPP1, 0, 4 },
-	{ DBGBUS_SSPP1, 0, 5 },
-	{ DBGBUS_SSPP1, 0, 6 },
-	{ DBGBUS_SSPP1, 0, 7 },
-
-	{ DBGBUS_SSPP1, 1, 0 },
-	{ DBGBUS_SSPP1, 1, 1 },
-	{ DBGBUS_SSPP1, 1, 2 },
-	{ DBGBUS_SSPP1, 1, 3 },
-	{ DBGBUS_SSPP1, 1, 4 },
-	{ DBGBUS_SSPP1, 1, 5 },
-	{ DBGBUS_SSPP1, 1, 6 },
-	{ DBGBUS_SSPP1, 1, 7 },
-
-	{ DBGBUS_SSPP1, 2, 0 },
-	{ DBGBUS_SSPP1, 2, 1 },
-	{ DBGBUS_SSPP1, 2, 2 },
-	{ DBGBUS_SSPP1, 2, 3 },
-	{ DBGBUS_SSPP1, 2, 4 },
-	{ DBGBUS_SSPP1, 2, 5 },
-	{ DBGBUS_SSPP1, 2, 6 },
-	{ DBGBUS_SSPP1, 2, 7 },
-
-	{ DBGBUS_SSPP1, 4, 0 },
-	{ DBGBUS_SSPP1, 4, 1 },
-	{ DBGBUS_SSPP1, 4, 2 },
-	{ DBGBUS_SSPP1, 4, 3 },
-	{ DBGBUS_SSPP1, 4, 4 },
-	{ DBGBUS_SSPP1, 4, 5 },
-	{ DBGBUS_SSPP1, 4, 6 },
-	{ DBGBUS_SSPP1, 4, 7 },
-
-	{ DBGBUS_SSPP1, 5, 0 },
-	{ DBGBUS_SSPP1, 5, 1 },
-	{ DBGBUS_SSPP1, 5, 2 },
-	{ DBGBUS_SSPP1, 5, 3 },
-	{ DBGBUS_SSPP1, 5, 4 },
-	{ DBGBUS_SSPP1, 5, 5 },
-	{ DBGBUS_SSPP1, 5, 6 },
-	{ DBGBUS_SSPP1, 5, 7 },
-
-	/* vig 3 */
-	{ DBGBUS_SSPP1, 20, 0 },
-	{ DBGBUS_SSPP1, 20, 1 },
-	{ DBGBUS_SSPP1, 20, 2 },
-	{ DBGBUS_SSPP1, 20, 3 },
-	{ DBGBUS_SSPP1, 20, 4 },
-	{ DBGBUS_SSPP1, 20, 5 },
-	{ DBGBUS_SSPP1, 20, 6 },
-	{ DBGBUS_SSPP1, 20, 7 },
-
-	{ DBGBUS_SSPP1, 21, 0 },
-	{ DBGBUS_SSPP1, 21, 1 },
-	{ DBGBUS_SSPP1, 21, 2 },
-	{ DBGBUS_SSPP1, 21, 3 },
-	{ DBGBUS_SSPP1, 21, 4 },
-	{ DBGBUS_SSPP1, 21, 5 },
-	{ DBGBUS_SSPP1, 21, 6 },
-	{ DBGBUS_SSPP1, 21, 7 },
-
-	{ DBGBUS_SSPP1, 22, 0 },
-	{ DBGBUS_SSPP1, 22, 1 },
-	{ DBGBUS_SSPP1, 22, 2 },
-	{ DBGBUS_SSPP1, 22, 3 },
-	{ DBGBUS_SSPP1, 22, 4 },
-	{ DBGBUS_SSPP1, 22, 5 },
-	{ DBGBUS_SSPP1, 22, 6 },
-	{ DBGBUS_SSPP1, 22, 7 },
-
-	{ DBGBUS_SSPP1, 24, 0 },
-	{ DBGBUS_SSPP1, 24, 1 },
-	{ DBGBUS_SSPP1, 24, 2 },
-	{ DBGBUS_SSPP1, 24, 3 },
-	{ DBGBUS_SSPP1, 24, 4 },
-	{ DBGBUS_SSPP1, 24, 5 },
-	{ DBGBUS_SSPP1, 24, 6 },
-	{ DBGBUS_SSPP1, 24, 7 },
-
-	{ DBGBUS_SSPP1, 25, 0 },
-	{ DBGBUS_SSPP1, 25, 1 },
-	{ DBGBUS_SSPP1, 25, 2 },
-	{ DBGBUS_SSPP1, 25, 3 },
-	{ DBGBUS_SSPP1, 25, 4 },
-	{ DBGBUS_SSPP1, 25, 5 },
-	{ DBGBUS_SSPP1, 25, 6 },
-	{ DBGBUS_SSPP1, 25, 7 },
-
-	/* dma 3 */
-	{ DBGBUS_SSPP1, 30, 0 },
-	{ DBGBUS_SSPP1, 30, 1 },
-	{ DBGBUS_SSPP1, 30, 2 },
-	{ DBGBUS_SSPP1, 30, 3 },
-	{ DBGBUS_SSPP1, 30, 4 },
-	{ DBGBUS_SSPP1, 30, 5 },
-	{ DBGBUS_SSPP1, 30, 6 },
-	{ DBGBUS_SSPP1, 30, 7 },
-
-	{ DBGBUS_SSPP1, 31, 0 },
-	{ DBGBUS_SSPP1, 31, 1 },
-	{ DBGBUS_SSPP1, 31, 2 },
-	{ DBGBUS_SSPP1, 31, 3 },
-	{ DBGBUS_SSPP1, 31, 4 },
-	{ DBGBUS_SSPP1, 31, 5 },
-	{ DBGBUS_SSPP1, 31, 6 },
-	{ DBGBUS_SSPP1, 31, 7 },
-
-	{ DBGBUS_SSPP1, 32, 0 },
-	{ DBGBUS_SSPP1, 32, 1 },
-	{ DBGBUS_SSPP1, 32, 2 },
-	{ DBGBUS_SSPP1, 32, 3 },
-	{ DBGBUS_SSPP1, 32, 4 },
-	{ DBGBUS_SSPP1, 32, 5 },
-	{ DBGBUS_SSPP1, 32, 6 },
-	{ DBGBUS_SSPP1, 32, 7 },
-
-	{ DBGBUS_SSPP1, 33, 0 },
-	{ DBGBUS_SSPP1, 33, 1 },
-	{ DBGBUS_SSPP1, 33, 2 },
-	{ DBGBUS_SSPP1, 33, 3 },
-	{ DBGBUS_SSPP1, 33, 4 },
-	{ DBGBUS_SSPP1, 33, 5 },
-	{ DBGBUS_SSPP1, 33, 6 },
-	{ DBGBUS_SSPP1, 33, 7 },
-
-	{ DBGBUS_SSPP1, 34, 0 },
-	{ DBGBUS_SSPP1, 34, 1 },
-	{ DBGBUS_SSPP1, 34, 2 },
-	{ DBGBUS_SSPP1, 34, 3 },
-	{ DBGBUS_SSPP1, 34, 4 },
-	{ DBGBUS_SSPP1, 34, 5 },
-	{ DBGBUS_SSPP1, 34, 6 },
-	{ DBGBUS_SSPP1, 34, 7 },
-
-	{ DBGBUS_SSPP1, 35, 0 },
-	{ DBGBUS_SSPP1, 35, 1 },
-	{ DBGBUS_SSPP1, 35, 2 },
-
-	/* dma 1 */
-	{ DBGBUS_SSPP1, 40, 0 },
-	{ DBGBUS_SSPP1, 40, 1 },
-	{ DBGBUS_SSPP1, 40, 2 },
-	{ DBGBUS_SSPP1, 40, 3 },
-	{ DBGBUS_SSPP1, 40, 4 },
-	{ DBGBUS_SSPP1, 40, 5 },
-	{ DBGBUS_SSPP1, 40, 6 },
-	{ DBGBUS_SSPP1, 40, 7 },
-
-	{ DBGBUS_SSPP1, 41, 0 },
-	{ DBGBUS_SSPP1, 41, 1 },
-	{ DBGBUS_SSPP1, 41, 2 },
-	{ DBGBUS_SSPP1, 41, 3 },
-	{ DBGBUS_SSPP1, 41, 4 },
-	{ DBGBUS_SSPP1, 41, 5 },
-	{ DBGBUS_SSPP1, 41, 6 },
-	{ DBGBUS_SSPP1, 41, 7 },
-
-	{ DBGBUS_SSPP1, 42, 0 },
-	{ DBGBUS_SSPP1, 42, 1 },
-	{ DBGBUS_SSPP1, 42, 2 },
-	{ DBGBUS_SSPP1, 42, 3 },
-	{ DBGBUS_SSPP1, 42, 4 },
-	{ DBGBUS_SSPP1, 42, 5 },
-	{ DBGBUS_SSPP1, 42, 6 },
-	{ DBGBUS_SSPP1, 42, 7 },
-
-	{ DBGBUS_SSPP1, 44, 0 },
-	{ DBGBUS_SSPP1, 44, 1 },
-	{ DBGBUS_SSPP1, 44, 2 },
-	{ DBGBUS_SSPP1, 44, 3 },
-	{ DBGBUS_SSPP1, 44, 4 },
-	{ DBGBUS_SSPP1, 44, 5 },
-	{ DBGBUS_SSPP1, 44, 6 },
-	{ DBGBUS_SSPP1, 44, 7 },
-
-	{ DBGBUS_SSPP1, 45, 0 },
-	{ DBGBUS_SSPP1, 45, 1 },
-	{ DBGBUS_SSPP1, 45, 2 },
-	{ DBGBUS_SSPP1, 45, 3 },
-	{ DBGBUS_SSPP1, 45, 4 },
-	{ DBGBUS_SSPP1, 45, 5 },
-	{ DBGBUS_SSPP1, 45, 6 },
-	{ DBGBUS_SSPP1, 45, 7 },
-
-	/* dspp */
-	{ DBGBUS_DSPP, 13, 0 },
-	{ DBGBUS_DSPP, 19, 0 },
-	{ DBGBUS_DSPP, 14, 0 },
-	{ DBGBUS_DSPP, 14, 1 },
-	{ DBGBUS_DSPP, 14, 3 },
-	{ DBGBUS_DSPP, 20, 0 },
-	{ DBGBUS_DSPP, 20, 1 },
-	{ DBGBUS_DSPP, 20, 3 },
-
-	/* ppb_0 */
-	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
-	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
-	/* ppb_1 */
-	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
-	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
-	/* lm_lut */
-	{ DBGBUS_DSPP, 109, 0 },
-	{ DBGBUS_DSPP, 105, 0 },
-	{ DBGBUS_DSPP, 103, 0 },
-
-	/* crossbar */
-	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
-	/* rotator */
-	{ DBGBUS_DSPP, 9, 0},
-
-	/* blend */
-	/* LM0 */
-	{ DBGBUS_DSPP, 63, 1},
-	{ DBGBUS_DSPP, 63, 2},
-	{ DBGBUS_DSPP, 63, 3},
-	{ DBGBUS_DSPP, 63, 4},
-	{ DBGBUS_DSPP, 63, 5},
-	{ DBGBUS_DSPP, 63, 6},
-	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 64, 1},
-	{ DBGBUS_DSPP, 64, 2},
-	{ DBGBUS_DSPP, 64, 3},
-	{ DBGBUS_DSPP, 64, 4},
-	{ DBGBUS_DSPP, 64, 5},
-	{ DBGBUS_DSPP, 64, 6},
-	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 65, 1},
-	{ DBGBUS_DSPP, 65, 2},
-	{ DBGBUS_DSPP, 65, 3},
-	{ DBGBUS_DSPP, 65, 4},
-	{ DBGBUS_DSPP, 65, 5},
-	{ DBGBUS_DSPP, 65, 6},
-	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 66, 1},
-	{ DBGBUS_DSPP, 66, 2},
-	{ DBGBUS_DSPP, 66, 3},
-	{ DBGBUS_DSPP, 66, 4},
-	{ DBGBUS_DSPP, 66, 5},
-	{ DBGBUS_DSPP, 66, 6},
-	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 67, 1},
-	{ DBGBUS_DSPP, 67, 2},
-	{ DBGBUS_DSPP, 67, 3},
-	{ DBGBUS_DSPP, 67, 4},
-	{ DBGBUS_DSPP, 67, 5},
-	{ DBGBUS_DSPP, 67, 6},
-	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 68, 1},
-	{ DBGBUS_DSPP, 68, 2},
-	{ DBGBUS_DSPP, 68, 3},
-	{ DBGBUS_DSPP, 68, 4},
-	{ DBGBUS_DSPP, 68, 5},
-	{ DBGBUS_DSPP, 68, 6},
-	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 69, 1},
-	{ DBGBUS_DSPP, 69, 2},
-	{ DBGBUS_DSPP, 69, 3},
-	{ DBGBUS_DSPP, 69, 4},
-	{ DBGBUS_DSPP, 69, 5},
-	{ DBGBUS_DSPP, 69, 6},
-	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 84, 1},
-	{ DBGBUS_DSPP, 84, 2},
-	{ DBGBUS_DSPP, 84, 3},
-	{ DBGBUS_DSPP, 84, 4},
-	{ DBGBUS_DSPP, 84, 5},
-	{ DBGBUS_DSPP, 84, 6},
-	{ DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 85, 1},
-	{ DBGBUS_DSPP, 85, 2},
-	{ DBGBUS_DSPP, 85, 3},
-	{ DBGBUS_DSPP, 85, 4},
-	{ DBGBUS_DSPP, 85, 5},
-	{ DBGBUS_DSPP, 85, 6},
-	{ DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 86, 1},
-	{ DBGBUS_DSPP, 86, 2},
-	{ DBGBUS_DSPP, 86, 3},
-	{ DBGBUS_DSPP, 86, 4},
-	{ DBGBUS_DSPP, 86, 5},
-	{ DBGBUS_DSPP, 86, 6},
-	{ DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
-	{ DBGBUS_DSPP, 87, 1},
-	{ DBGBUS_DSPP, 87, 2},
-	{ DBGBUS_DSPP, 87, 3},
-	{ DBGBUS_DSPP, 87, 4},
-	{ DBGBUS_DSPP, 87, 5},
-	{ DBGBUS_DSPP, 87, 6},
-	{ DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
-	/* LM1 */
-	{ DBGBUS_DSPP, 70, 1},
-	{ DBGBUS_DSPP, 70, 2},
-	{ DBGBUS_DSPP, 70, 3},
-	{ DBGBUS_DSPP, 70, 4},
-	{ DBGBUS_DSPP, 70, 5},
-	{ DBGBUS_DSPP, 70, 6},
-	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 71, 1},
-	{ DBGBUS_DSPP, 71, 2},
-	{ DBGBUS_DSPP, 71, 3},
-	{ DBGBUS_DSPP, 71, 4},
-	{ DBGBUS_DSPP, 71, 5},
-	{ DBGBUS_DSPP, 71, 6},
-	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 72, 1},
-	{ DBGBUS_DSPP, 72, 2},
-	{ DBGBUS_DSPP, 72, 3},
-	{ DBGBUS_DSPP, 72, 4},
-	{ DBGBUS_DSPP, 72, 5},
-	{ DBGBUS_DSPP, 72, 6},
-	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 73, 1},
-	{ DBGBUS_DSPP, 73, 2},
-	{ DBGBUS_DSPP, 73, 3},
-	{ DBGBUS_DSPP, 73, 4},
-	{ DBGBUS_DSPP, 73, 5},
-	{ DBGBUS_DSPP, 73, 6},
-	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 74, 1},
-	{ DBGBUS_DSPP, 74, 2},
-	{ DBGBUS_DSPP, 74, 3},
-	{ DBGBUS_DSPP, 74, 4},
-	{ DBGBUS_DSPP, 74, 5},
-	{ DBGBUS_DSPP, 74, 6},
-	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 75, 1},
-	{ DBGBUS_DSPP, 75, 2},
-	{ DBGBUS_DSPP, 75, 3},
-	{ DBGBUS_DSPP, 75, 4},
-	{ DBGBUS_DSPP, 75, 5},
-	{ DBGBUS_DSPP, 75, 6},
-	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 76, 1},
-	{ DBGBUS_DSPP, 76, 2},
-	{ DBGBUS_DSPP, 76, 3},
-	{ DBGBUS_DSPP, 76, 4},
-	{ DBGBUS_DSPP, 76, 5},
-	{ DBGBUS_DSPP, 76, 6},
-	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 88, 1},
-	{ DBGBUS_DSPP, 88, 2},
-	{ DBGBUS_DSPP, 88, 3},
-	{ DBGBUS_DSPP, 88, 4},
-	{ DBGBUS_DSPP, 88, 5},
-	{ DBGBUS_DSPP, 88, 6},
-	{ DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 89, 1},
-	{ DBGBUS_DSPP, 89, 2},
-	{ DBGBUS_DSPP, 89, 3},
-	{ DBGBUS_DSPP, 89, 4},
-	{ DBGBUS_DSPP, 89, 5},
-	{ DBGBUS_DSPP, 89, 6},
-	{ DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 90, 1},
-	{ DBGBUS_DSPP, 90, 2},
-	{ DBGBUS_DSPP, 90, 3},
-	{ DBGBUS_DSPP, 90, 4},
-	{ DBGBUS_DSPP, 90, 5},
-	{ DBGBUS_DSPP, 90, 6},
-	{ DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 91, 1},
-	{ DBGBUS_DSPP, 91, 2},
-	{ DBGBUS_DSPP, 91, 3},
-	{ DBGBUS_DSPP, 91, 4},
-	{ DBGBUS_DSPP, 91, 5},
-	{ DBGBUS_DSPP, 91, 6},
-	{ DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
-	/* LM2 */
-	{ DBGBUS_DSPP, 77, 0},
-	{ DBGBUS_DSPP, 77, 1},
-	{ DBGBUS_DSPP, 77, 2},
-	{ DBGBUS_DSPP, 77, 3},
-	{ DBGBUS_DSPP, 77, 4},
-	{ DBGBUS_DSPP, 77, 5},
-	{ DBGBUS_DSPP, 77, 6},
-	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 78, 0},
-	{ DBGBUS_DSPP, 78, 1},
-	{ DBGBUS_DSPP, 78, 2},
-	{ DBGBUS_DSPP, 78, 3},
-	{ DBGBUS_DSPP, 78, 4},
-	{ DBGBUS_DSPP, 78, 5},
-	{ DBGBUS_DSPP, 78, 6},
-	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 79, 0},
-	{ DBGBUS_DSPP, 79, 1},
-	{ DBGBUS_DSPP, 79, 2},
-	{ DBGBUS_DSPP, 79, 3},
-	{ DBGBUS_DSPP, 79, 4},
-	{ DBGBUS_DSPP, 79, 5},
-	{ DBGBUS_DSPP, 79, 6},
-	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 80, 0},
-	{ DBGBUS_DSPP, 80, 1},
-	{ DBGBUS_DSPP, 80, 2},
-	{ DBGBUS_DSPP, 80, 3},
-	{ DBGBUS_DSPP, 80, 4},
-	{ DBGBUS_DSPP, 80, 5},
-	{ DBGBUS_DSPP, 80, 6},
-	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 81, 0},
-	{ DBGBUS_DSPP, 81, 1},
-	{ DBGBUS_DSPP, 81, 2},
-	{ DBGBUS_DSPP, 81, 3},
-	{ DBGBUS_DSPP, 81, 4},
-	{ DBGBUS_DSPP, 81, 5},
-	{ DBGBUS_DSPP, 81, 6},
-	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 82, 0},
-	{ DBGBUS_DSPP, 82, 1},
-	{ DBGBUS_DSPP, 82, 2},
-	{ DBGBUS_DSPP, 82, 3},
-	{ DBGBUS_DSPP, 82, 4},
-	{ DBGBUS_DSPP, 82, 5},
-	{ DBGBUS_DSPP, 82, 6},
-	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 83, 0},
-	{ DBGBUS_DSPP, 83, 1},
-	{ DBGBUS_DSPP, 83, 2},
-	{ DBGBUS_DSPP, 83, 3},
-	{ DBGBUS_DSPP, 83, 4},
-	{ DBGBUS_DSPP, 83, 5},
-	{ DBGBUS_DSPP, 83, 6},
-	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 92, 1},
-	{ DBGBUS_DSPP, 92, 2},
-	{ DBGBUS_DSPP, 92, 3},
-	{ DBGBUS_DSPP, 92, 4},
-	{ DBGBUS_DSPP, 92, 5},
-	{ DBGBUS_DSPP, 92, 6},
-	{ DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 93, 1},
-	{ DBGBUS_DSPP, 93, 2},
-	{ DBGBUS_DSPP, 93, 3},
-	{ DBGBUS_DSPP, 93, 4},
-	{ DBGBUS_DSPP, 93, 5},
-	{ DBGBUS_DSPP, 93, 6},
-	{ DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 94, 1},
-	{ DBGBUS_DSPP, 94, 2},
-	{ DBGBUS_DSPP, 94, 3},
-	{ DBGBUS_DSPP, 94, 4},
-	{ DBGBUS_DSPP, 94, 5},
-	{ DBGBUS_DSPP, 94, 6},
-	{ DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 95, 1},
-	{ DBGBUS_DSPP, 95, 2},
-	{ DBGBUS_DSPP, 95, 3},
-	{ DBGBUS_DSPP, 95, 4},
-	{ DBGBUS_DSPP, 95, 5},
-	{ DBGBUS_DSPP, 95, 6},
-	{ DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
-	/* LM5 */
-	{ DBGBUS_DSPP, 110, 1},
-	{ DBGBUS_DSPP, 110, 2},
-	{ DBGBUS_DSPP, 110, 3},
-	{ DBGBUS_DSPP, 110, 4},
-	{ DBGBUS_DSPP, 110, 5},
-	{ DBGBUS_DSPP, 110, 6},
-	{ DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 111, 1},
-	{ DBGBUS_DSPP, 111, 2},
-	{ DBGBUS_DSPP, 111, 3},
-	{ DBGBUS_DSPP, 111, 4},
-	{ DBGBUS_DSPP, 111, 5},
-	{ DBGBUS_DSPP, 111, 6},
-	{ DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 112, 1},
-	{ DBGBUS_DSPP, 112, 2},
-	{ DBGBUS_DSPP, 112, 3},
-	{ DBGBUS_DSPP, 112, 4},
-	{ DBGBUS_DSPP, 112, 5},
-	{ DBGBUS_DSPP, 112, 6},
-	{ DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 113, 1},
-	{ DBGBUS_DSPP, 113, 2},
-	{ DBGBUS_DSPP, 113, 3},
-	{ DBGBUS_DSPP, 113, 4},
-	{ DBGBUS_DSPP, 113, 5},
-	{ DBGBUS_DSPP, 113, 6},
-	{ DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 114, 1},
-	{ DBGBUS_DSPP, 114, 2},
-	{ DBGBUS_DSPP, 114, 3},
-	{ DBGBUS_DSPP, 114, 4},
-	{ DBGBUS_DSPP, 114, 5},
-	{ DBGBUS_DSPP, 114, 6},
-	{ DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 115, 1},
-	{ DBGBUS_DSPP, 115, 2},
-	{ DBGBUS_DSPP, 115, 3},
-	{ DBGBUS_DSPP, 115, 4},
-	{ DBGBUS_DSPP, 115, 5},
-	{ DBGBUS_DSPP, 115, 6},
-	{ DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 116, 1},
-	{ DBGBUS_DSPP, 116, 2},
-	{ DBGBUS_DSPP, 116, 3},
-	{ DBGBUS_DSPP, 116, 4},
-	{ DBGBUS_DSPP, 116, 5},
-	{ DBGBUS_DSPP, 116, 6},
-	{ DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 117, 1},
-	{ DBGBUS_DSPP, 117, 2},
-	{ DBGBUS_DSPP, 117, 3},
-	{ DBGBUS_DSPP, 117, 4},
-	{ DBGBUS_DSPP, 117, 5},
-	{ DBGBUS_DSPP, 117, 6},
-	{ DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 118, 1},
-	{ DBGBUS_DSPP, 118, 2},
-	{ DBGBUS_DSPP, 118, 3},
-	{ DBGBUS_DSPP, 118, 4},
-	{ DBGBUS_DSPP, 118, 5},
-	{ DBGBUS_DSPP, 118, 6},
-	{ DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 119, 1},
-	{ DBGBUS_DSPP, 119, 2},
-	{ DBGBUS_DSPP, 119, 3},
-	{ DBGBUS_DSPP, 119, 4},
-	{ DBGBUS_DSPP, 119, 5},
-	{ DBGBUS_DSPP, 119, 6},
-	{ DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
-	{ DBGBUS_DSPP, 120, 1},
-	{ DBGBUS_DSPP, 120, 2},
-	{ DBGBUS_DSPP, 120, 3},
-	{ DBGBUS_DSPP, 120, 4},
-	{ DBGBUS_DSPP, 120, 5},
-	{ DBGBUS_DSPP, 120, 6},
-	{ DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
-	/* csc */
-	{ DBGBUS_SSPP0, 7, 0},
-	{ DBGBUS_SSPP0, 7, 1},
-	{ DBGBUS_SSPP0, 27, 0},
-	{ DBGBUS_SSPP0, 27, 1},
-	{ DBGBUS_SSPP1, 7, 0},
-	{ DBGBUS_SSPP1, 7, 1},
-	{ DBGBUS_SSPP1, 27, 0},
-	{ DBGBUS_SSPP1, 27, 1},
-
-	/* pcc */
-	{ DBGBUS_SSPP0, 3,  3},
-	{ DBGBUS_SSPP0, 23, 3},
-	{ DBGBUS_SSPP0, 33, 3},
-	{ DBGBUS_SSPP0, 43, 3},
-	{ DBGBUS_SSPP1, 3,  3},
-	{ DBGBUS_SSPP1, 23, 3},
-	{ DBGBUS_SSPP1, 33, 3},
-	{ DBGBUS_SSPP1, 43, 3},
-
-	/* spa */
-	{ DBGBUS_SSPP0, 8,  0},
-	{ DBGBUS_SSPP0, 28, 0},
-	{ DBGBUS_SSPP1, 8,  0},
-	{ DBGBUS_SSPP1, 28, 0},
-	{ DBGBUS_DSPP, 13, 0},
-	{ DBGBUS_DSPP, 19, 0},
-
-	/* igc */
-	{ DBGBUS_SSPP0, 17, 0},
-	{ DBGBUS_SSPP0, 17, 1},
-	{ DBGBUS_SSPP0, 17, 3},
-	{ DBGBUS_SSPP0, 37, 0},
-	{ DBGBUS_SSPP0, 37, 1},
-	{ DBGBUS_SSPP0, 37, 3},
-	{ DBGBUS_SSPP0, 46, 0},
-	{ DBGBUS_SSPP0, 46, 1},
-	{ DBGBUS_SSPP0, 46, 3},
-
-	{ DBGBUS_SSPP1, 17, 0},
-	{ DBGBUS_SSPP1, 17, 1},
-	{ DBGBUS_SSPP1, 17, 3},
-	{ DBGBUS_SSPP1, 37, 0},
-	{ DBGBUS_SSPP1, 37, 1},
-	{ DBGBUS_SSPP1, 37, 3},
-	{ DBGBUS_SSPP1, 46, 0},
-	{ DBGBUS_SSPP1, 46, 1},
-	{ DBGBUS_SSPP1, 46, 3},
-
-	{ DBGBUS_DSPP, 14, 0},
-	{ DBGBUS_DSPP, 14, 1},
-	{ DBGBUS_DSPP, 14, 3},
-	{ DBGBUS_DSPP, 20, 0},
-	{ DBGBUS_DSPP, 20, 1},
-	{ DBGBUS_DSPP, 20, 3},
-
-	/* intf0-3 */
-	{ DBGBUS_PERIPH, 0, 0},
-	{ DBGBUS_PERIPH, 1, 0},
-	{ DBGBUS_PERIPH, 2, 0},
-	{ DBGBUS_PERIPH, 3, 0},
-
-	/* te counter wrapper */
-	{ DBGBUS_PERIPH, 60, 0},
-
-	/* dsc0 */
-	{ DBGBUS_PERIPH, 47, 0},
-	{ DBGBUS_PERIPH, 47, 1},
-	{ DBGBUS_PERIPH, 47, 2},
-	{ DBGBUS_PERIPH, 47, 3},
-	{ DBGBUS_PERIPH, 47, 4},
-	{ DBGBUS_PERIPH, 47, 5},
-	{ DBGBUS_PERIPH, 47, 6},
-	{ DBGBUS_PERIPH, 47, 7},
-
-	/* dsc1 */
-	{ DBGBUS_PERIPH, 48, 0},
-	{ DBGBUS_PERIPH, 48, 1},
-	{ DBGBUS_PERIPH, 48, 2},
-	{ DBGBUS_PERIPH, 48, 3},
-	{ DBGBUS_PERIPH, 48, 4},
-	{ DBGBUS_PERIPH, 48, 5},
-	{ DBGBUS_PERIPH, 48, 6},
-	{ DBGBUS_PERIPH, 48, 7},
-
-	/* dsc2 */
-	{ DBGBUS_PERIPH, 51, 0},
-	{ DBGBUS_PERIPH, 51, 1},
-	{ DBGBUS_PERIPH, 51, 2},
-	{ DBGBUS_PERIPH, 51, 3},
-	{ DBGBUS_PERIPH, 51, 4},
-	{ DBGBUS_PERIPH, 51, 5},
-	{ DBGBUS_PERIPH, 51, 6},
-	{ DBGBUS_PERIPH, 51, 7},
-
-	/* dsc3 */
-	{ DBGBUS_PERIPH, 52, 0},
-	{ DBGBUS_PERIPH, 52, 1},
-	{ DBGBUS_PERIPH, 52, 2},
-	{ DBGBUS_PERIPH, 52, 3},
-	{ DBGBUS_PERIPH, 52, 4},
-	{ DBGBUS_PERIPH, 52, 5},
-	{ DBGBUS_PERIPH, 52, 6},
-	{ DBGBUS_PERIPH, 52, 7},
-
-	/* tear-check */
-	{ DBGBUS_PERIPH, 63, 0 },
-	{ DBGBUS_PERIPH, 64, 0 },
-	{ DBGBUS_PERIPH, 65, 0 },
-	{ DBGBUS_PERIPH, 73, 0 },
-	{ DBGBUS_PERIPH, 74, 0 },
-
-	/* cdwn */
-	{ DBGBUS_PERIPH, 80, 0},
-	{ DBGBUS_PERIPH, 80, 1},
-	{ DBGBUS_PERIPH, 80, 2},
-
-	{ DBGBUS_PERIPH, 81, 0},
-	{ DBGBUS_PERIPH, 81, 1},
-	{ DBGBUS_PERIPH, 81, 2},
-
-	{ DBGBUS_PERIPH, 82, 0},
-	{ DBGBUS_PERIPH, 82, 1},
-	{ DBGBUS_PERIPH, 82, 2},
-	{ DBGBUS_PERIPH, 82, 3},
-	{ DBGBUS_PERIPH, 82, 4},
-	{ DBGBUS_PERIPH, 82, 5},
-	{ DBGBUS_PERIPH, 82, 6},
-	{ DBGBUS_PERIPH, 82, 7},
-
-	/* hdmi */
-	{ DBGBUS_PERIPH, 68, 0},
-	{ DBGBUS_PERIPH, 68, 1},
-	{ DBGBUS_PERIPH, 68, 2},
-	{ DBGBUS_PERIPH, 68, 3},
-	{ DBGBUS_PERIPH, 68, 4},
-	{ DBGBUS_PERIPH, 68, 5},
-
-	/* edp */
-	{ DBGBUS_PERIPH, 69, 0},
-	{ DBGBUS_PERIPH, 69, 1},
-	{ DBGBUS_PERIPH, 69, 2},
-	{ DBGBUS_PERIPH, 69, 3},
-	{ DBGBUS_PERIPH, 69, 4},
-	{ DBGBUS_PERIPH, 69, 5},
-
-	/* dsi0 */
-	{ DBGBUS_PERIPH, 70, 0},
-	{ DBGBUS_PERIPH, 70, 1},
-	{ DBGBUS_PERIPH, 70, 2},
-	{ DBGBUS_PERIPH, 70, 3},
-	{ DBGBUS_PERIPH, 70, 4},
-	{ DBGBUS_PERIPH, 70, 5},
-
-	/* dsi1 */
-	{ DBGBUS_PERIPH, 71, 0},
-	{ DBGBUS_PERIPH, 71, 1},
-	{ DBGBUS_PERIPH, 71, 2},
-	{ DBGBUS_PERIPH, 71, 3},
-	{ DBGBUS_PERIPH, 71, 4},
-	{ DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
-	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
-	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
-	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
-	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
-	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
-	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
-	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
-	if (enable)
-		pm_runtime_get_sync(dpu_dbg_base.dev);
-	else
-		pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
-	bool in_log, in_mem;
-	u32 **dump_mem = NULL;
-	u32 *dump_addr = NULL;
-	u32 status = 0;
-	struct dpu_debug_bus_entry *head;
-	phys_addr_t phys = 0;
-	int list_size;
-	int i;
-	u32 offset;
-	void __iomem *mem_base = NULL;
-	struct dpu_dbg_reg_base *reg_base;
-
-	if (!bus || !bus->cmn.entries_size)
-		return;
-
-	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
-			reg_base_head)
-		if (strlen(reg_base->name) &&
-			!strcmp(reg_base->name, bus->cmn.name))
-			mem_base = reg_base->base + bus->top_blk_off;
-
-	if (!mem_base) {
-		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-		return;
-	}
-
-	dump_mem = &bus->cmn.dumped_content;
-
-	/* will keep in memory 4 entries of 4 bytes each */
-	list_size = (bus->cmn.entries_size * 4 * 4);
-
-	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
-	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
-	if (!in_log && !in_mem)
-		return;
-
-	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
-			bus->cmn.name);
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
-				list_size, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			dev_info(dpu_dbg_base.dev,
-				"%s: start_addr:0x%pK len:0x%x\n",
-				__func__, dump_addr, list_size);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	_dpu_dbg_enable_power(true);
-	for (i = 0; i < bus->cmn.entries_size; i++) {
-		head = bus->entries + i;
-		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
-				mem_base + head->wr_addr);
-		wmb(); /* make sure test bits were written */
-
-		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
-			offset = DBGBUS_DSPP_STATUS;
-			/* keep DSPP test point enabled */
-			if (head->wr_addr != DBGBUS_DSPP)
-				writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
-		} else {
-			offset = head->wr_addr + 0x4;
-		}
-
-		status = readl_relaxed(mem_base + offset);
-
-		if (in_log)
-			dev_info(dpu_dbg_base.dev,
-					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
-					head->wr_addr, head->block_id,
-					head->test_id, status);
-
-		if (dump_addr && in_mem) {
-			dump_addr[i*4]     = head->wr_addr;
-			dump_addr[i*4 + 1] = head->block_id;
-			dump_addr[i*4 + 2] = head->test_id;
-			dump_addr[i*4 + 3] = status;
-		}
-
-		if (head->analyzer)
-			head->analyzer(mem_base, head, status);
-
-		/* Disable debug bus once we are done */
-		writel_relaxed(0, mem_base + head->wr_addr);
-		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
-						head->wr_addr != DBGBUS_DSPP)
-			writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
-	}
-	_dpu_dbg_enable_power(false);
-
-	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
-			bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
-		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
-		u32 *dump_addr, bool in_log)
-{
-	int i, j;
-	u32 val;
-
-	if (!dump_addr && !in_log)
-		return;
-
-	for (i = 0; i < head->block_cnt; i++) {
-		writel_relaxed(1 << (i + head->bit_offset),
-				mem_base + head->block_bus_addr);
-		/* make sure that current bus blcok enable */
-		wmb();
-		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
-			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
-			/* make sure that test point is enabled */
-			wmb();
-			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
-			if (dump_addr) {
-				*dump_addr++ = head->block_bus_addr;
-				*dump_addr++ = i;
-				*dump_addr++ = j;
-				*dump_addr++ = val;
-			}
-			if (in_log)
-				dev_info(dpu_dbg_base.dev,
-					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
-					head->block_bus_addr, i, j, val);
-		}
-	}
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
-	bool in_log, in_mem;
-	u32 **dump_mem = NULL;
-	u32 *dump_addr = NULL;
-	u32 value, d0, d1;
-	unsigned long reg, reg1, reg2;
-	struct vbif_debug_bus_entry *head;
-	phys_addr_t phys = 0;
-	int i, list_size = 0;
-	void __iomem *mem_base = NULL;
-	struct vbif_debug_bus_entry *dbg_bus;
-	u32 bus_size;
-	struct dpu_dbg_reg_base *reg_base;
-
-	if (!bus || !bus->cmn.entries_size)
-		return;
-
-	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
-			reg_base_head)
-		if (strlen(reg_base->name) &&
-			!strcmp(reg_base->name, bus->cmn.name))
-			mem_base = reg_base->base;
-
-	if (!mem_base) {
-		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-		return;
-	}
-
-	dbg_bus = bus->entries;
-	bus_size = bus->cmn.entries_size;
-	list_size = bus->cmn.entries_size;
-	dump_mem = &bus->cmn.dumped_content;
-
-	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
-			bus->cmn.name);
-
-	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
-		return;
-
-	/* allocate memory for each test point */
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-		list_size += (head->block_cnt * head->test_pnt_cnt);
-	}
-
-	/* 4 bytes * 4 entries for each test point*/
-	list_size *= 16;
-
-	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
-	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
-	if (!in_log && !in_mem)
-		return;
-
-	if (in_mem) {
-		if (!(*dump_mem))
-			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
-				list_size, &phys, GFP_KERNEL);
-
-		if (*dump_mem) {
-			dump_addr = *dump_mem;
-			dev_info(dpu_dbg_base.dev,
-				"%s: start_addr:0x%pK len:0x%x\n",
-				__func__, dump_addr, list_size);
-		} else {
-			in_mem = false;
-			pr_err("dump_mem: allocation fails\n");
-		}
-	}
-
-	_dpu_dbg_enable_power(true);
-
-	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
-	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
-	/* make sure that vbif core is on */
-	wmb();
-
-	/**
-	 * Extract VBIF error info based on XIN halt and error status.
-	 * If the XIN client is not in HALT state, or an error is detected,
-	 * then retrieve the VBIF error info for it.
-	 */
-	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
-	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
-	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
-	dev_err(dpu_dbg_base.dev,
-			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
-			reg, reg1, reg2);
-	reg >>= 16;
-	reg &= ~(reg1 | reg2);
-	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
-		if (!test_bit(0, &reg)) {
-			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
-			/* make sure reg write goes through */
-			wmb();
-
-			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
-			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
-			dev_err(dpu_dbg_base.dev,
-					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
-					i, d0, d1);
-		}
-		reg >>= 1;
-	}
-
-	for (i = 0; i < bus_size; i++) {
-		head = dbg_bus + i;
-
-		writel_relaxed(0, mem_base + head->disable_bus_addr);
-		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
-		/* make sure that other bus is off */
-		wmb();
-
-		_dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
-				in_log);
-		if (dump_addr)
-			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
-	}
-
-	_dpu_dbg_enable_power(false);
-
-	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
-			bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
-			    bool dump_dbgbus_vbif_rt)
-{
-	if (dump_dbgbus_dpu)
-		_dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
-	if (dump_dbgbus_vbif_rt)
-		_dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
-	_dpu_dump_array("dpudump_workitem",
-		dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
-		dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
-		  bool dump_dbgbus_vbif_rt)
-{
-	if (queue_work && work_pending(&dpu_dbg_base.dump_work))
-		return;
-
-	if (!queue_work) {
-		_dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
-		return;
-	}
-
-	/* schedule work to dump later */
-	dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
-	dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
-			dump_dbgbus_vbif_rt;
-	schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
-	const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	_dpu_dump_array("dump_debugfs", true, true);
-	return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
-	.open = dpu_dbg_debugfs_open,
-	.write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-	char debug_name[80] = "";
-
-	if (!debugfs_root)
-		return -EINVAL;
-
-	debugfs_create_file("dump", 0600, debugfs_root, NULL,
-			&dpu_dbg_dump_fops);
-
-	if (dbg->dbgbus_dpu.entries) {
-		dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
-		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-				dbg->dbgbus_dpu.cmn.name);
-		dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
-		debugfs_create_u32(debug_name, 0600, debugfs_root,
-				&dbg->dbgbus_dpu.cmn.enable_mask);
-	}
-
-	if (dbg->dbgbus_vbif_rt.entries) {
-		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
-		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-				dbg->dbgbus_vbif_rt.cmn.name);
-		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-		debugfs_create_u32(debug_name, 0600, debugfs_root,
-				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
-	}
-
-	return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
-	memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
-	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
-	if (IS_MSM8998_TARGET(hwversion)) {
-		dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
-		dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
-		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-		dbg->dbgbus_vbif_rt.cmn.entries_size =
-				ARRAY_SIZE(vbif_dbg_bus_msm8998);
-	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
-		dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
-		dbg->dbgbus_dpu.cmn.entries_size =
-				ARRAY_SIZE(dbg_bus_dpu_sdm845);
-		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-		/* vbif is unchanged vs 8998 */
-		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-		dbg->dbgbus_vbif_rt.cmn.entries_size =
-				ARRAY_SIZE(vbif_dbg_bus_msm8998);
-	} else {
-		pr_err("unsupported chipset id %X\n", hwversion);
-	}
-}
-
-int dpu_dbg_init(struct device *dev)
-{
-	if (!dev) {
-		pr_err("invalid params\n");
-		return -EINVAL;
-	}
-
-	INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
-	dpu_dbg_base.dev = dev;
-
-	INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
-	return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
-	_dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-	dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644
index 1e6fa94..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
-	DPU_DBG_DUMP_IN_LOG = BIT(0),
-	DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion:		Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev:		device handle
- * Returns:		0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root:	debugfs root in which to create dpu debug entries
- * Returns:	0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns:	none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work:	  whether to queue the dumping work to the work_struct
- * @name:	  string indicating origin of dump
- * @dump_dbgbus:  dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns:	none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
-		  bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- *	address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
-	return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-	return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
-				bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d31d828..36158b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
  *	Virtual encoder defers as much as possible to the physical encoders.
  *	Virtual encoder registers itself with the DRM Framework as the encoder.
  * @base:		drm_encoder base class for registration with DRM
- * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @bus_scaling_client:	Client handle to the bus scaling interface
+ * @enabled:		True if the encoder is active, protected by enc_lock
  * @num_phys_encs:	Actual number of physical encoders contained.
  * @phys_encs:		Container of physical encoders managed.
  * @cur_master:		Pointer to the current master in this mode. Optimization
@@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
  * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
  *			for partial update right-only cases, such as pingpong
  *			split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
- *			notification of the VBLANK
- * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
+ * @crtc:		Pointer to the currently assigned crtc. Normally you
+ *			would use crtc->state->encoder_mask to determine the
+ *			link between encoder/crtc. However in this case we need
+ *			to track crtc in the disable() hook which is called
+ *			_after_ encoder_mask is cleared.
  * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
  *				all CTL paths
  * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
  * @debugfs_root:		Debug file system root file node
- * @enc_lock:			Lock around physical encoder create/destroy and
-				access.
+ * @enc_lock:			Lock around physical encoder
+ *				create/destroy/enable/disable
  * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
  *				busy processing current command.
  *				Bit0 = phys_encs[0] etc.
@@ -175,6 +178,8 @@ struct dpu_encoder_virt {
 	spinlock_t enc_spinlock;
 	uint32_t bus_scaling_client;
 
+	bool enabled;
+
 	unsigned int num_phys_encs;
 	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct dpu_encoder_phys *cur_master;
@@ -183,8 +188,7 @@ struct dpu_encoder_virt {
 
 	bool intfs_swapped;
 
-	void (*crtc_vblank_cb)(void *);
-	void *crtc_vblank_cb_data;
+	struct drm_crtc *crtc;
 
 	struct dentry *debugfs_root;
 	struct mutex enc_lock;
@@ -210,39 +214,6 @@ struct dpu_encoder_virt {
 };
 
 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
-								bool enable)
-{
-	struct drm_encoder *drm_enc;
-	struct msm_drm_private *priv;
-	struct dpu_kms *dpu_kms;
-
-	if (!dpu_enc) {
-		DPU_ERROR("invalid dpu enc\n");
-		return -EINVAL;
-	}
-
-	drm_enc = &dpu_enc->base;
-	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
-		DPU_ERROR("drm device invalid\n");
-		return -EINVAL;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	if (!priv->kms) {
-		DPU_ERROR("invalid kms\n");
-		return -EINVAL;
-	}
-
-	dpu_kms = to_dpu_kms(priv->kms);
-
-	if (enable)
-		pm_runtime_get_sync(&dpu_kms->pdev->dev);
-	else
-		pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-	return 0;
-}
 
 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
 		enum dpu_intr_idx intr_idx)
@@ -1117,28 +1088,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
 }
 
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
 {
-	struct dpu_encoder_virt *dpu_enc = NULL;
-	int i;
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
 
-	if (!drm_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	mutex_lock(&dpu_enc->enc_lock);
 
-	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+	if (!dpu_enc->enabled)
+		goto out;
 
-		if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
-			phys->ops.restore(phys);
-	}
-
+	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
 		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
 
 	_dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+	mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@@ -1152,6 +1119,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 		return;
 	}
 	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	mutex_lock(&dpu_enc->enc_lock);
 	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
 
 	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@@ -1168,10 +1137,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 	if (ret) {
 		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
 				ret);
-		return;
+		goto out;
 	}
 
 	_dpu_encoder_virt_enable_helper(drm_enc);
+
+	dpu_enc->enabled = true;
+
+out:
+	mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1193,11 +1167,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 		return;
 	}
 
-	mode = &drm_enc->crtc->state->adjusted_mode;
-
 	dpu_enc = to_dpu_encoder_virt(drm_enc);
 	DPU_DEBUG_ENC(dpu_enc, "\n");
 
+	mutex_lock(&dpu_enc->enc_lock);
+	dpu_enc->enabled = false;
+
+	mode = &drm_enc->crtc->state->adjusted_mode;
+
 	priv = drm_enc->dev->dev_private;
 	dpu_kms = to_dpu_kms(priv->kms);
 
@@ -1231,6 +1208,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
 
 	dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+	mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1261,8 +1240,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
 	dpu_enc = to_dpu_encoder_virt(drm_enc);
 
 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
-	if (dpu_enc->crtc_vblank_cb)
-		dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+	if (dpu_enc->crtc)
+		dpu_crtc_vblank_callback(dpu_enc->crtc);
 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 
 	atomic_inc(&phy_enc->vsync_cnt);
@@ -1282,25 +1261,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
 	DPU_ATRACE_END("encoder_underrun_callback");
 }
 
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
-		void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
 {
 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
 	unsigned long lock_flags;
-	bool enable;
+
+	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+	/* crtc should always be cleared before re-assigning */
+	WARN_ON(crtc && dpu_enc->crtc);
+	dpu_enc->crtc = crtc;
+	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+					struct drm_crtc *crtc, bool enable)
+{
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	unsigned long lock_flags;
 	int i;
 
-	enable = vbl_cb ? true : false;
-
-	if (!drm_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return;
-	}
 	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
 
 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
-	dpu_enc->crtc_vblank_cb = vbl_cb;
-	dpu_enc->crtc_vblank_cb_data = vbl_data;
+	if (dpu_enc->crtc != crtc) {
+		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+		return;
+	}
 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1405,8 +1391,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
  * phys: Pointer to physical encoder structure
  * extra_flush_bits: Additional bit mask to include in flush trigger
  */
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
-		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
+		bool async)
 {
 	struct dpu_hw_ctl *ctl;
 	int pending_kickoff_cnt;
@@ -1429,7 +1416,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
 		return;
 	}
 
-	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+	if (!async)
+		pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+	else
+		pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1448,7 +1438,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
  * _dpu_encoder_trigger_start - trigger start for a physical encoder
  * phys: Pointer to physical encoder structure
  */
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
 {
 	if (!phys) {
 		DPU_ERROR("invalid argument(s)\n");
@@ -1505,7 +1495,7 @@ static int dpu_encoder_helper_wait_event_timeout(
 	return rc;
 }
 
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
 {
 	struct dpu_encoder_virt *dpu_enc;
 	struct dpu_hw_ctl *ctl;
@@ -1525,10 +1515,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
 		      ctl->idx);
 
 	rc = ctl->ops.reset(ctl);
-	if (rc) {
+	if (rc)
 		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
-		dpu_dbg_dump(false, __func__, true, true);
-	}
 
 	phys_enc->enable_state = DPU_ENC_ENABLED;
 }
@@ -1542,7 +1530,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
  *	a time.
  * dpu_enc: Pointer to virtual encoder structure
  */
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+				      bool async)
 {
 	struct dpu_hw_ctl *ctl;
 	uint32_t i, pending_flush;
@@ -1573,7 +1562,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
 			set_bit(i, dpu_enc->frame_busy_mask);
 		if (!phys->ops.needs_single_flush ||
 				!phys->ops.needs_single_flush(phys))
-			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+						   async);
 		else if (ctl->ops.get_pending_flush)
 			pending_flush |= ctl->ops.get_pending_flush(ctl);
 	}
@@ -1583,7 +1573,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
 		_dpu_encoder_trigger_flush(
 				&dpu_enc->base,
 				dpu_enc->cur_master,
-				pending_flush);
+				pending_flush, async);
 	}
 
 	_dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1767,7 +1757,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
 }
 
 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
-		struct dpu_encoder_kickoff_params *params)
+		struct dpu_encoder_kickoff_params *params, bool async)
 {
 	struct dpu_encoder_virt *dpu_enc;
 	struct dpu_encoder_phys *phys;
@@ -1801,14 +1791,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 	if (needs_hw_reset) {
 		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-			phys = dpu_enc->phys_encs[i];
-			if (phys && phys->ops.hw_reset)
-				phys->ops.hw_reset(phys);
+			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
 		}
 	}
 }
 
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
 {
 	struct dpu_encoder_virt *dpu_enc;
 	struct dpu_encoder_phys *phys;
@@ -1831,7 +1819,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
 		((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
 
 	/* All phys encs are ready to go, trigger the kickoff */
-	_dpu_encoder_kickoff_phys(dpu_enc);
+	_dpu_encoder_kickoff_phys(dpu_enc, async);
 
 	/* allow phys encs to handle any post-kickoff business */
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1873,14 +1861,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
 #ifdef CONFIG_DEBUG_FS
 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
 {
-	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_virt *dpu_enc = s->private;
 	int i;
 
-	if (!s || !s->private)
-		return -EINVAL;
-
-	dpu_enc = s->private;
-
 	mutex_lock(&dpu_enc->enc_lock);
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1918,7 +1901,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
 
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
-	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
 	struct msm_drm_private *priv;
 	struct dpu_kms *dpu_kms;
 	int i;
@@ -1932,12 +1915,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
 	char name[DPU_NAME_SIZE];
 
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
 		DPU_ERROR("invalid encoder or kms\n");
 		return -EINVAL;
 	}
 
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
 	priv = drm_enc->dev->dev_private;
 	dpu_kms = to_dpu_kms(priv->kms);
 
@@ -1962,26 +1944,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
 	return 0;
 }
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-	struct dpu_encoder_virt *dpu_enc;
-
-	if (!drm_enc)
-		return;
-
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
-	debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
 #else
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
 	return 0;
 }
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
 #endif
 
 static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -1991,7 +1958,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
 
 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
 {
-	_dpu_encoder_destroy_debugfs(encoder);
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+	debugfs_remove_recursive(dpu_enc->debugfs_root);
 }
 
 static int dpu_encoder_virt_add_phys_encs(
@@ -2266,6 +2235,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
 	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
 
+	dpu_enc->enabled = false;
+
 	return &dpu_enc->base;
 }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 9dbf38f..3f5dafe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
 				  struct dpu_encoder_hw_resources *hw_res);
 
 /**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- *	will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
  * @encoder:	encoder pointer
- * @cb:		callback pointer, provide NULL to deregister and disable IRQs
- * @data:	user data provided to callback
+ * @crtc:	crtc pointer
  */
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
-		void (*cb)(void *), void *data);
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+			     struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ *	the encoder is assigned to the given crtc
+ * @encoder:	encoder pointer
+ * @crtc:	crtc pointer
+ * @enable:	true if vblank should be enabled
+ */
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+					struct drm_crtc *crtc, bool enable);
 
 /**
  * dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
  *	Delayed: Block until next trigger can be issued.
  * @encoder:	encoder pointer
  * @params:	kickoff time parameters
+ * @async:	true if this is an asynchronous commit
  */
 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
-		struct dpu_encoder_kickoff_params *params);
+		struct dpu_encoder_kickoff_params *params, bool async);
 
 /**
  * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
  * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
+ * @async:	true if this is an asynchronous commit
  */
-void dpu_encoder_kickoff(struct drm_encoder *encoder);
+void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
 
 /**
  * dpu_encoder_wait_for_event - Waits for encoder events
@@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
  * @encoder:	encoder pointer
  */
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
 
 /**
  * dpu_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 964efcc..44e6f8b6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
  * @handle_post_kickoff:	Do any work necessary post-kickoff work
  * @trigger_start:		Process start event on physical encoder
  * @needs_single_flush:		Whether encoder slaves need to be flushed
- * @hw_reset:			Issue HW recovery such as CTL reset and clear
- *				DPU_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:		Handler to enable/disable all the encoder IRQs
  * @prepare_idle_pc:		phys encoder can update the vsync_enable status
  *                              on idle power collapse prepare
@@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
 	void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
 	void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
 	bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
-	void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
 	void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
 	void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
 	void (*restore)(struct dpu_encoder_phys *phys);
@@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
  */
 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
 
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- *	This helper function may be optionally specified by physical
- *	encoders if they require ctl hw reset. If state is currently
- *	DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
 static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
 		struct dpu_encoder_phys *phys_enc)
 {
@@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
 	dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
 
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-	    dpu_crtc_state_is_stereo(dpu_cstate))
+	    dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b2d7f0d..99ab5ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -44,14 +44,7 @@
 
 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
 
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
-		struct dpu_encoder_phys_cmd *cmd_enc)
-{
-	return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
-		struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
 {
 	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
 }
@@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
 			  atomic_read(&phys_enc->pending_kickoff_cnt));
 
 		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
-		dpu_dbg_dump(false, __func__, true, true);
 	}
 
 	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
 
 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
-		goto skip_flush;
+		return;
 
 	ctl = phys_enc->hw_ctl;
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
-	return;
 }
 
 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
 
 	wait_info.wq = &cmd_enc->pending_vblank_wq;
 	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
-	wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
 
 	atomic_inc(&cmd_enc->pending_vblank_cnt);
 
@@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
 	ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
 	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
 	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
-	ops->hw_reset = dpu_encoder_helper_hw_reset;
 	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
 	ops->restore = dpu_encoder_phys_cmd_enable_helper;
 	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
 	if (!cmd_enc) {
 		ret = -ENOMEM;
 		DPU_ERROR("failed to allocate\n");
-		goto fail;
+		return ERR_PTR(ret);
 	}
 	phys_enc = &cmd_enc->base;
 	phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
 
 	return phys_enc;
 
-fail:
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 84de385..acdab5b0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
 	 */
 }
 
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
 {
 	u32 active = timing->xres;
 	u32 inactive =
@@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
 	return active + inactive;
 }
 
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
 {
 	u32 active = timing->yres;
 	u32 inactive =
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	if (hw_ctl && hw_ctl->ops.get_flush_register)
 		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
-	if (flush_register == 0)
+	if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
 		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
 				-1, 0);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
 		DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
 				ctl->idx, rc);
 		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
-		dpu_dbg_dump(false, __func__, true, true);
 	}
 }
 
@@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
 	ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
 	ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
 	ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
-	ops->hw_reset = dpu_encoder_helper_hw_reset;
 	ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
 }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index d743e7c..0874f0a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
 			+ layout->plane_size[2] + layout->plane_size[3];
 
 		if (!meta)
-			goto done;
+			return 0;
 
 		/* configure Y metadata plane */
 		layout->plane_addr[2] = base_addr;
@@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
 		layout->plane_addr[1] = 0;
 
 		if (!meta)
-			goto done;
+			return 0;
 
 		layout->plane_addr[2] = base_addr;
 		layout->plane_addr[3] = 0;
 	}
-done:
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
index 58d29e4..92f1c42 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
  * @type: hw block type - enum dpu_hw_blk_type
  * @id: instance id of the hw block
  * @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
  */
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
 		struct dpu_hw_blk_ops *ops)
 {
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return -EINVAL;
-	}
-
 	INIT_LIST_HEAD(&hw_blk->list);
 	hw_blk->type = type;
 	hw_blk->id = id;
@@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
 	mutex_lock(&dpu_hw_blk_lock);
 	list_add(&hw_blk->list, &dpu_hw_blk_list);
 	mutex_unlock(&dpu_hw_blk_lock);
-
-	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 0f4ca8af..1934c2f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -44,7 +44,7 @@ struct dpu_hw_blk {
 	struct dpu_hw_blk_ops ops;
 };
 
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
 		struct dpu_hw_blk_ops *ops);
 void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index dc060e7..144358a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
  */
 void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
 
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg:          pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
-	return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
-			 test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
 #endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index eec1051..1068b4b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -13,8 +13,8 @@
 #include <linux/delay.h>
 #include "dpu_hwio.h"
 #include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
+#include "dpu_trace.h"
 
 #define   CTL_LAYER(lm)                 \
 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
 	return stages;
 }
 
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, CTL_FLUSH);
+}
+
 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
 {
+	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+				       dpu_hw_ctl_get_flush_register(ctx));
 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
 {
+	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+					 dpu_hw_ctl_get_flush_register(ctx));
 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
 }
 
 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
 {
+	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+				     dpu_hw_ctl_get_flush_register(ctx));
 	ctx->pending_flush_mask = 0x0;
 }
 
 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
 		u32 flushbits)
 {
+	trace_dpu_hw_ctl_update_pending_flush(flushbits,
+					      ctx->pending_flush_mask);
 	ctx->pending_flush_mask |= flushbits;
 }
 
@@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
 
 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
 {
-
+	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+				     dpu_hw_ctl_get_flush_register(ctx));
 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
 }
 
-static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
-{
-	struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
-	return DPU_REG_READ(c, CTL_FLUSH);
-}
-
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
 	enum dpu_sspp sspp)
 {
 	uint32_t flushbits = 0;
@@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
 	return flushbits;
 }
 
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
 	enum dpu_lm lm)
 {
 	uint32_t flushbits = 0;
@@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
 	return flushbits;
 }
 
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
 		u32 *flushbits, enum dpu_intf intf)
 {
 	switch (intf) {
@@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
 	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 		void __iomem *addr,
@@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 {
 	struct dpu_hw_ctl *c;
 	struct dpu_ctl_cfg *cfg;
-	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 	c->mixer_count = m->mixer_count;
 	c->mixer_hw_caps = m->mixer;
 
-	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
+	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
 
 	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 9c6bba0..f6a83da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define INTF_TIMING_ENGINE_EN           0x000
@@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
 	ops->get_line_count = dpu_hw_intf_get_line_count;
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 		void __iomem *addr,
@@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 {
 	struct dpu_hw_intf *c;
 	struct dpu_intf_cfg *cfg;
-	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 	c->mdss = m;
 	_setup_intf_ops(&c->ops, c->cap->features);
 
-	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
+	dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
 
 	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 3b77df4..a2b0dbc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -92,16 +92,6 @@ struct dpu_hw_intf {
 };
 
 /**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
-	return container_of(hw, struct dpu_hw_intf, base);
-}
-
-/**
  * dpu_hw_intf_init(): Initializes the intf driver for the passed
  * interface idx.
  * @idx:  interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index acb8dc8..018df2c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define LM_OP_MODE                        0x00
@@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
 static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
 {
 	const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
-	int rc;
+	if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+		return sblk->blendstage_base[stage - DPU_STAGE_0];
 
-	if (stage == DPU_STAGE_BASE)
-		rc = -EINVAL;
-	else if (stage <= sblk->maxblendstages)
-		rc = sblk->blendstage_base[stage - DPU_STAGE_0];
-	else
-		rc = -EINVAL;
-
-	return rc;
+	return -EINVAL;
 }
 
 static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
 	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
 }
 
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
-			void *cfg)
-{
-}
-
 static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
 		struct dpu_hw_lm_ops *ops,
 		unsigned long features)
@@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
 		ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
 	ops->setup_alpha_out = dpu_hw_lm_setup_color3;
 	ops->setup_border_color = dpu_hw_lm_setup_border_color;
-	ops->setup_gc = dpu_hw_lm_gc;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 		void __iomem *addr,
@@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 {
 	struct dpu_hw_mixer *c;
 	struct dpu_lm_cfg *cfg;
-	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 	c->cap = cfg;
 	_setup_mixer_ops(m, &c->ops, c->cap->features);
 
-	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
+	dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
 
 	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 5b036ac..6aee839 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
 	void (*setup_border_color)(struct dpu_hw_mixer *ctx,
 		struct dpu_mdss_color *color,
 		u8 border_en);
-	/**
-	 * setup_gc : enable/disable gamma correction feature
-	 */
-	void (*setup_gc)(struct dpu_hw_mixer *mixer,
-			void *cfg);
 };
 
 struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index cc3a623..3bdf47e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -16,7 +16,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 #include "dpu_trace.h"
 
@@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
 	height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
 
 	if (height < init)
-		goto line_count_exit;
+		return line;
 
 	line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
 
@@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
 	else
 		line -= init;
 
-line_count_exit:
 	return line;
 }
 
@@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
 	ops->get_line_count = dpu_hw_pp_get_line_count;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 		void __iomem *addr,
@@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 {
 	struct dpu_hw_pingpong *c;
 	struct dpu_pingpong_cfg *cfg;
-	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 	c->caps = cfg;
 	_setup_pingpong_ops(&c->ops, c->caps);
 
-	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
+	dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
 
 	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3caccd7..0e02e43 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -105,16 +105,6 @@ struct dpu_hw_pingpong {
 };
 
 /**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
-	return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
-/**
  * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
  *	pingpong idx.
  * @idx:  Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index c25b52a..e9132bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -14,7 +14,6 @@
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
@@ -141,7 +140,7 @@
 /* traffic shaper clock in Hz */
 #define TS_CLK			19200000
 
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
 {
@@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
 		test_bit(DPU_SSPP_CSC_10BIT, &features))
 		c->ops.setup_csc = dpu_hw_sspp_setup_csc;
 
-	if (dpu_hw_sspp_multirect_enabled(c->cap))
+	if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+		test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
 		c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
 
 	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
 	return ERR_PTR(-ENOMEM);
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
 		void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
 {
 	struct dpu_hw_pipe *hw_pipe;
 	struct dpu_sspp_cfg *cfg;
-	int rc;
 
 	if (!addr || !catalog)
 		return ERR_PTR(-EINVAL);
@@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
 	hw_pipe->cap = cfg;
 	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
 
-	rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
+	dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
 
 	return hw_pipe;
-
-blk_init_error:
-	kzfree(hw_pipe);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4d81e5f..119b4e1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -392,16 +392,6 @@ struct dpu_hw_pipe {
 };
 
 /**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
-	return container_of(hw, struct dpu_hw_pipe, base);
-}
-
-/**
  * dpu_hw_sspp_init - initializes the sspp hw driver object.
  * Should be called once before accessing every pipe.
  * @idx:  Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index b878125..a041597 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_top.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define SSPP_SPARE                        0x28
@@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
 	return ERR_PTR(-EINVAL);
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-	.start = NULL,
-	.stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
 		void __iomem *addr,
@@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
 {
 	struct dpu_hw_mdp *mdp;
 	const struct dpu_mdp_cfg *cfg;
-	int rc;
 
 	if (!addr || !m)
 		return ERR_PTR(-EINVAL);
@@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
 	mdp->caps = cfg;
 	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
 
-	rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
-	if (rc) {
-		DPU_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+	dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
 
 	return mdp;
-
-blk_init_error:
-	kzfree(mdp);
-
-	return ERR_PTR(rc);
 }
 
 void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 192e338..aa21fd8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -161,16 +161,6 @@ struct dpu_hw_mdp {
 };
 
 /**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
-	return container_of(hw, struct dpu_hw_mdp, base);
-}
-
-/**
  * dpu_hw_mdptop_init - initializes the top driver for the passed idx
  * @idx:  Interface index for which driver object is required
  * @addr: Mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index d439055..38bfd22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
 
 #define VBIF_VERSION			0x0000
 #define VBIF_CLK_FORCE_CTRL0		0x0008
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index b557687..78833c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -16,6 +16,8 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 
+#include <drm/drm_print.h>
+
 #include "dpu_io_util.h"
 
 void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
 						   "clock-names", i,
 						   &clock_name);
 		if (rc) {
-			dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+			DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
 				i);
 			break;
 		}
@@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
 
 	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
 	if (rc) {
-		dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+		DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
 		goto err;
 	}
 
 	rc = of_clk_set_defaults(pdev->dev.of_node, false);
 	if (rc) {
-		dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+		DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
 		goto err;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644
index d5e6ce0..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
-	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-	return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
-	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-	if (!dpu_kms->dev || !dpu_kms->dev->dev) {
-		pr_err("invalid device handles\n");
-		return;
-	}
-
-	dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
-	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-	int rc;
-
-	if (!kms) {
-		DPU_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	rc = dpu_core_irq_postinstall(dpu_kms);
-
-	return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
-	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-	if (!kms) {
-		DPU_ERROR("invalid parameters\n");
-		return;
-	}
-
-	dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644
index 3e147f7..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask:	enable status of MDSS level interrupt
- * @domain:		interrupt domain of this controller
- */
-struct dpu_irq_controller {
-	unsigned long enabled_mask;
-	struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms:		pointer to kms context
- * @return:		none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms:		pointer to kms context
- * @return:		0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev:		pointer to kms context
- * @return:		none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms:		pointer to kms context
- * @return:		interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 0a683e6..4d67b3c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
 	struct dpu_danger_safe_status status;
 	int i;
 
-	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+	if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
 		DPU_ERROR("invalid arg(s)\n");
 		return 0;
 	}
@@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 }
 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
 
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
-	debugfs_remove_recursive(dpu_kms->debugfs_danger);
-	dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
 		struct dentry *parent)
 {
-	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
-			parent);
-	if (!dpu_kms->debugfs_danger) {
-		DPU_ERROR("failed to create danger debugfs\n");
-		return -EINVAL;
-	}
+	struct dentry *entry = debugfs_create_dir("danger", parent);
+	if (IS_ERR_OR_NULL(entry))
+		return;
 
-	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+	debugfs_create_file("danger_status", 0600, entry,
 			dpu_kms, &dpu_debugfs_danger_stats_fops);
-	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+	debugfs_create_file("safe_status", 0600, entry,
 			dpu_kms, &dpu_debugfs_safe_stats_fops);
-
-	return 0;
 }
 
 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
 {
-	struct dpu_debugfs_regset32 *regset;
-	struct dpu_kms *dpu_kms;
+	struct dpu_debugfs_regset32 *regset = s->private;
+	struct dpu_kms *dpu_kms = regset->dpu_kms;
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
 	void __iomem *base;
 	uint32_t i, addr;
 
-	if (!s || !s->private)
-		return 0;
-
-	regset = s->private;
-
-	dpu_kms = regset->dpu_kms;
-	if (!dpu_kms || !dpu_kms->mmio)
+	if (!dpu_kms->mmio)
 		return 0;
 
 	dev = dpu_kms->dev;
@@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
 
 static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
 {
-	void *p;
-	int rc;
+	void *p = dpu_hw_util_get_log_mask_ptr();
+	struct dentry *entry;
 
-	p = dpu_hw_util_get_log_mask_ptr();
-
-	if (!dpu_kms || !p)
+	if (!p)
 		return -EINVAL;
 
-	dpu_kms->debugfs_root = debugfs_create_dir("debug",
-					   dpu_kms->dev->primary->debugfs_root);
-	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
-		DRM_ERROR("debugfs create_dir failed %ld\n",
-			  PTR_ERR(dpu_kms->debugfs_root));
-		return PTR_ERR(dpu_kms->debugfs_root);
-	}
-
-	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
-	if (rc) {
-		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
-		return rc;
-	}
+	entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(entry))
+		return -ENODEV;
 
 	/* allow root to be NULL */
-	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
 
-	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
-	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
-	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+	dpu_debugfs_danger_init(dpu_kms, entry);
+	dpu_debugfs_vbif_init(dpu_kms, entry);
+	dpu_debugfs_core_irq_init(dpu_kms, entry);
 
-	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
-	if (rc) {
-		DPU_ERROR("failed to init perf %d\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
-	/* don't need to NULL check debugfs_root */
-	if (dpu_kms) {
-		dpu_debugfs_vbif_destroy(dpu_kms);
-		dpu_debugfs_danger_destroy(dpu_kms);
-		dpu_debugfs_core_irq_destroy(dpu_kms);
-		debugfs_remove_recursive(dpu_kms->debugfs_root);
-	}
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+	return dpu_core_perf_debugfs_init(dpu_kms, entry);
 }
 #endif
 
@@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
 	struct dpu_kms *dpu_kms;
 	struct msm_drm_private *priv;
 	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
 	struct drm_encoder *encoder;
+	int i;
 
 	if (!kms)
 		return;
@@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
 	priv = dev->dev_private;
 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc != NULL)
+	/* Call prepare_commit for all affected encoders */
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		drm_for_each_encoder_mask(encoder, crtc->dev,
+					  crtc_state->encoder_mask) {
 			dpu_encoder_prepare_commit(encoder);
+		}
+	}
 }
 
 /*
@@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
 {
 	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
-	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc;
 
 	/* Forward this enable call to the commit hook */
 	if (funcs && funcs->commit)
 		funcs->commit(encoder);
 
-	if (crtc && crtc->state->active) {
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+	drm_for_each_crtc(crtc, dev) {
+		if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
+			continue;
+
 		trace_dpu_kms_enc_enable(DRMID(crtc));
-		dpu_crtc_commit_kickoff(crtc);
+		dpu_crtc_commit_kickoff(crtc, false);
 	}
 }
 
@@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 
 		if (crtc->state->active) {
 			trace_dpu_kms_commit(DRMID(crtc));
-			dpu_crtc_commit_kickoff(crtc);
+			dpu_crtc_commit_kickoff(crtc,
+						state->legacy_cursor_update);
 		}
 	}
 }
@@ -613,22 +576,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
 #ifdef CONFIG_DEBUG_FS
 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 {
-	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-	struct drm_device *dev;
-	int rc;
-
-	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
-		DPU_ERROR("invalid dpu_kms\n");
-		return -EINVAL;
-	}
-
-	dev = dpu_kms->dev;
-
-	rc = _dpu_debugfs_init(dpu_kms);
-	if (rc)
-		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
-	return rc;
+	return _dpu_debugfs_init(to_dpu_kms(kms));
 }
 #endif
 
@@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
 		dpu_hw_intr_destroy(dpu_kms->hw_intr);
 	dpu_kms->hw_intr = NULL;
 
-	if (dpu_kms->power_event)
-		dpu_power_handle_unregister_event(
-				&dpu_kms->phandle, dpu_kms->power_event);
-
 	/* safe to call these more than once during shutdown */
-	_dpu_debugfs_destroy(dpu_kms);
 	_dpu_kms_mmu_destroy(dpu_kms);
 
 	if (dpu_kms->catalog) {
@@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
 		dpu_hw_catalog_deinit(dpu_kms->catalog);
 	dpu_kms->catalog = NULL;
 
-	if (dpu_kms->core_client)
-		dpu_power_client_destroy(&dpu_kms->phandle,
-			dpu_kms->core_client);
-	dpu_kms->core_client = NULL;
-
 	if (dpu_kms->vbif[VBIF_NRT])
 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
 	dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
 
 	dpu_kms = to_dpu_kms(kms);
 
-	dpu_dbg_destroy();
 	_dpu_kms_hw_destroy(dpu_kms);
 }
 
-static int dpu_kms_pm_suspend(struct device *dev)
-{
-	struct drm_device *ddev;
-	struct drm_modeset_acquire_ctx ctx;
-	struct drm_atomic_state *state;
-	struct dpu_kms *dpu_kms;
-	int ret = 0, num_crtcs = 0;
-
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev_to_msm_kms(ddev))
-		return -EINVAL;
-
-	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
-	/* disable hot-plug polling */
-	drm_kms_helper_poll_disable(ddev);
-
-	/* acquire modeset lock(s) */
-	drm_modeset_acquire_init(&ctx, 0);
-
-retry:
-	DPU_ATRACE_BEGIN("kms_pm_suspend");
-
-	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
-	if (ret)
-		goto unlock;
-
-	/* save current state for resume */
-	if (dpu_kms->suspend_state)
-		drm_atomic_state_put(dpu_kms->suspend_state);
-	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
-	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
-		DRM_ERROR("failed to back up suspend state\n");
-		dpu_kms->suspend_state = NULL;
-		goto unlock;
-	}
-
-	/* create atomic state to disable all CRTCs */
-	state = drm_atomic_state_alloc(ddev);
-	if (IS_ERR_OR_NULL(state)) {
-		DRM_ERROR("failed to allocate crtc disable state\n");
-		goto unlock;
-	}
-
-	state->acquire_ctx = &ctx;
-
-	/* check for nothing to do */
-	if (num_crtcs == 0) {
-		DRM_DEBUG("all crtcs are already in the off state\n");
-		drm_atomic_state_put(state);
-		goto suspended;
-	}
-
-	/* commit the "disable all" state */
-	ret = drm_atomic_commit(state);
-	if (ret < 0) {
-		DRM_ERROR("failed to disable crtcs, %d\n", ret);
-		drm_atomic_state_put(state);
-		goto unlock;
-	}
-
-suspended:
-	dpu_kms->suspend_block = true;
-
-unlock:
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	DPU_ATRACE_END("kms_pm_suspend");
-	return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
-	struct drm_device *ddev;
-	struct dpu_kms *dpu_kms;
-	int ret;
-
-	if (!dev)
-		return -EINVAL;
-
-	ddev = dev_get_drvdata(dev);
-	if (!ddev || !ddev_to_msm_kms(ddev))
-		return -EINVAL;
-
-	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
-	DPU_ATRACE_BEGIN("kms_pm_resume");
-
-	drm_mode_config_reset(ddev);
-
-	drm_modeset_lock_all(ddev);
-
-	dpu_kms->suspend_block = false;
-
-	if (dpu_kms->suspend_state) {
-		dpu_kms->suspend_state->acquire_ctx =
-			ddev->mode_config.acquire_ctx;
-		ret = drm_atomic_commit(dpu_kms->suspend_state);
-		if (ret < 0) {
-			DRM_ERROR("failed to restore state, %d\n", ret);
-			drm_atomic_state_put(dpu_kms->suspend_state);
-		}
-		dpu_kms->suspend_state = NULL;
-	}
-	drm_modeset_unlock_all(ddev);
-
-	/* enable hot-plug polling */
-	drm_kms_helper_poll_enable(ddev);
-
-	DPU_ATRACE_END("kms_pm_resume");
-	return 0;
-}
-
 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
 				 struct drm_encoder *encoder,
 				 bool cmd_mode)
@@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
 			encoder->base.id, rc);
 }
 
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	dpu_core_irq_uninstall(dpu_kms);
+}
+
 static const struct msm_kms_funcs kms_funcs = {
 	.hw_init         = dpu_kms_hw_init,
 	.irq_preinstall  = dpu_irq_preinstall,
-	.irq_postinstall = dpu_irq_postinstall,
 	.irq_uninstall   = dpu_irq_uninstall,
 	.irq             = dpu_irq,
 	.prepare_commit  = dpu_kms_prepare_commit,
@@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
 	.check_modified_format = dpu_format_check_modified_format,
 	.get_format      = dpu_get_msm_format,
 	.round_pixclk    = dpu_kms_round_pixclk,
-	.pm_suspend      = dpu_kms_pm_suspend,
-	.pm_resume       = dpu_kms_pm_resume,
 	.destroy         = dpu_kms_destroy,
 	.set_encoder_mode = _dpu_kms_set_encoder_mode,
 #ifdef CONFIG_DEBUG_FS
@@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
 #endif
 };
 
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
-	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
 {
 	struct msm_mmu *mmu;
@@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 	if (!domain)
 		return 0;
 
+	domain->geometry.aperture_start = 0x1000;
+	domain->geometry.aperture_end = 0xffffffff;
+
 	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
 			domain, "dpu1");
 	if (IS_ERR(aspace)) {
@@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
 	return clk_get_rate(clk->clk);
 }
 
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
-	struct dpu_kms *dpu_kms = usr;
-
-	if (!dpu_kms)
-		return;
-
-	dpu_vbif_init_memtypes(dpu_kms);
-}
-
 static int dpu_kms_hw_init(struct msm_kms *kms)
 {
 	struct dpu_kms *dpu_kms;
@@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
 	if (!kms) {
 		DPU_ERROR("invalid kms\n");
-		goto end;
+		return rc;
 	}
 
 	dpu_kms = to_dpu_kms(kms);
 	dev = dpu_kms->dev;
 	if (!dev) {
 		DPU_ERROR("invalid device\n");
-		goto end;
-	}
-
-	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
-	if (rc) {
-		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
-		goto end;
+		return rc;
 	}
 
 	priv = dev->dev_private;
 	if (!priv) {
 		DPU_ERROR("invalid private data\n");
-		goto dbg_destroy;
+		return rc;
 	}
 
 	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
 	}
 
-	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
-					"core");
-	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
-		rc = PTR_ERR(dpu_kms->core_client);
-		if (!dpu_kms->core_client)
-			rc = -EINVAL;
-		DPU_ERROR("dpu power client create failed: %d\n", rc);
-		dpu_kms->core_client = NULL;
-		goto error;
-	}
-
 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
-	_dpu_kms_core_hw_rev_init(dpu_kms);
+	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
 
 	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
 
@@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 		goto power_error;
 	}
 
-	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
 	/*
 	 * Now we need to read the HW catalog and initialize resources such as
 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 	}
 
 	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
-			&dpu_kms->phandle,
 			_dpu_kms_get_clk(dpu_kms, "core"));
 	if (rc) {
 		DPU_ERROR("failed to init perf %d\n", rc);
@@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
-	/*
-	 * Handle (re)initializations during power enable
-	 */
-	dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
-	dpu_kms->power_event = dpu_power_handle_register_event(
-			&dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
-			dpu_kms_handle_power_event, dpu_kms, "kms");
+	dpu_vbif_init_memtypes(dpu_kms);
 
 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
@@ -1171,9 +966,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
 error:
 	_dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
-	dpu_dbg_destroy();
-end:
+
 	return rc;
 }
 
@@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
 		return ret;
 	}
 
-	dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
 	platform_set_drvdata(pdev, dpu_kms);
 
 	msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
 	struct dss_module_power *mp = &dpu_kms->mp;
 
-	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
 	devm_kfree(&pdev->dev, mp->clk_config);
 	mp->num_clk = 0;
@@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
 	ddev = dpu_kms->dev;
 	if (!ddev) {
 		DPU_ERROR("invalid drm_device\n");
-		goto exit;
+		return rc;
 	}
 
-	rc = dpu_power_resource_enable(&dpu_kms->phandle,
-			dpu_kms->core_client, false);
-	if (rc)
-		DPU_ERROR("resource disable failed: %d\n", rc);
-
 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
 	if (rc)
 		DPU_ERROR("clock disable failed rc:%d\n", rc);
 
-exit:
 	return rc;
 }
 
@@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
 	int rc = -1;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_encoder *encoder;
 	struct drm_device *ddev;
 	struct dss_module_power *mp = &dpu_kms->mp;
 
 	ddev = dpu_kms->dev;
 	if (!ddev) {
 		DPU_ERROR("invalid drm_device\n");
-		goto exit;
+		return rc;
 	}
 
 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
 	if (rc) {
 		DPU_ERROR("clock enable failed rc:%d\n", rc);
-		goto exit;
+		return rc;
 	}
 
-	rc = dpu_power_resource_enable(&dpu_kms->phandle,
-			dpu_kms->core_client, true);
-	if (rc)
-		DPU_ERROR("resource enable failed: %d\n", rc);
+	dpu_vbif_init_memtypes(dpu_kms);
 
-exit:
+	drm_for_each_encoder(encoder, ddev)
+		dpu_encoder_virt_runtime_resume(encoder);
+
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 66d4666..ac75cfc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -23,15 +23,13 @@
 #include "msm_kms.h"
 #include "msm_mmu.h"
 #include "msm_gem.h"
-#include "dpu_dbg.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_ctl.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_interrupts.h"
 #include "dpu_hw_top.h"
+#include "dpu_io_util.h"
 #include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
 #include "dpu_core_perf.h"
 
 #define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -104,7 +102,6 @@ struct dpu_irq {
 	atomic_t *enable_counts;
 	atomic_t *irq_counts;
 	spinlock_t cb_lock;
-	struct dentry *debugfs_file;
 };
 
 struct dpu_kms {
@@ -113,15 +110,6 @@ struct dpu_kms {
 	int core_rev;
 	struct dpu_mdss_cfg *catalog;
 
-	struct dpu_power_handle phandle;
-	struct dpu_power_client *core_client;
-	struct dpu_power_event *power_event;
-
-	/* directory entry for debugfs */
-	struct dentry *debugfs_root;
-	struct dentry *debugfs_danger;
-	struct dentry *debugfs_vbif;
-
 	/* io/register spaces: */
 	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
 	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +123,6 @@ struct dpu_kms {
 
 	struct dpu_core_perf perf;
 
-	/* saved atomic state during system suspend */
-	struct drm_atomic_state *suspend_state;
-	bool suspend_block;
-
 	struct dpu_rm rm;
 	bool rm_init;
 
@@ -164,33 +148,6 @@ struct vsync_info {
 		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
 
 /**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
-	if (!ddev_to_msm_kms(dev))
-		return false;
-
-	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- *				suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
-	if (!dpu_kms_is_suspend_state(dev))
-		return false;
-
-	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
  * Debugfs functions - extra helper functions for debugfs support
  *
  * Main debugfs documentation is located at,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 2235ef8..cb307a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -9,6 +9,11 @@
 
 #define HW_INTR_STATUS			0x0010
 
+struct dpu_irq_controller {
+	unsigned long enabled_mask;
+	struct irq_domain *domain;
+};
+
 struct dpu_mdss {
 	struct msm_mdss base;
 	void __iomem *mmio;
@@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
 	return 0;
 }
 
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
 {
 	if (dpu_mdss->irq_controller.domain) {
 		irq_domain_remove(dpu_mdss->irq_controller.domain);
 		dpu_mdss->irq_controller.domain = NULL;
 	}
-	return 0;
 }
 static int dpu_mdss_enable(struct msm_mdss *mdss)
 {
@@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
 	struct dss_module_power *mp = &dpu_mdss->mp;
 
+	pm_runtime_suspend(dev->dev);
+	pm_runtime_disable(dev->dev);
 	_dpu_mdss_irq_domain_fini(dpu_mdss);
-
 	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
-
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
 	devm_kfree(&pdev->dev, mp->clk_config);
 
 	if (dpu_mdss->mmio)
 		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
 	dpu_mdss->mmio = NULL;
-
-	pm_runtime_disable(dev->dev);
 	priv->mdss = NULL;
 }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f549daf..fd75870 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
  * @src_wdith:		width of source buffer
  * Return: fill level corresponding to the source buffer/format or 0 if error
  */
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
 		const struct dpu_format *fmt, u32 src_width)
 {
 	struct dpu_plane *pdpu, *tmp;
@@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
 	dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
 }
 
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
-		struct dpu_plane *pdpu)
-{
-	struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
-
-	return kms->base.aspace;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
 		struct dpu_plane_state *pstate,
 		struct dpu_hw_pipe_cfg *pipe_cfg,
 		struct drm_framebuffer *fb)
 {
 	struct dpu_plane *pdpu = to_dpu_plane(plane);
-	struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+	struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+	struct msm_gem_address_space *aspace = kms->base.aspace;
 	int ret;
 
 	ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
 	scale_cfg->enable = 1;
 }
 
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
 {
 	static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
 		{
@@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
 	struct drm_gem_object *obj;
 	struct msm_gem_object *msm_obj;
 	struct dma_fence *fence;
-	struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+	struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
 	int ret;
 
 	if (!new_state->fb)
@@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
 	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
 
 	/* cache aspace */
-	pstate->aspace = aspace;
+	pstate->aspace = kms->base.aspace;
 
 	/*
 	 * TODO: Need to sort out the msm_framebuffer_prepare() call below so
@@ -1179,8 +1169,6 @@ static void dpu_plane_destroy(struct drm_plane *plane)
 
 		mutex_destroy(&pdpu->lock);
 
-		drm_plane_helper_disable(plane, NULL);
-
 		/* this will destroy the states as well */
 		drm_plane_cleanup(plane);
 
@@ -1193,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
 static void dpu_plane_destroy_state(struct drm_plane *plane,
 		struct drm_plane_state *state)
 {
-	struct dpu_plane_state *pstate;
-
-	if (!plane || !state) {
-		DPU_ERROR("invalid arg(s), plane %d state %d\n",
-				plane != 0, state != 0);
-		return;
-	}
-
-	pstate = to_dpu_plane_state(state);
-
 	__drm_atomic_helper_plane_destroy_state(state);
-
-	kfree(pstate);
+	kfree(to_dpu_plane_state(state));
 }
 
 static struct drm_plane_state *
@@ -1275,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
 			char __user *buff, size_t count, loff_t *ppos)
 {
 	struct dpu_kms *kms = file->private_data;
-	struct dpu_mdss_cfg *cfg = kms->catalog;
-	int len = 0;
-	char buf[40] = {'\0'};
+	int len;
+	char buf[40];
 
-	if (!cfg)
-		return -ENODEV;
+	len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
 
-	if (*ppos)
-		return 0; /* the end */
-
-	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
-	if (len < 0 || len >= sizeof(buf))
-		return 0;
-
-	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-		return -EFAULT;
-
-	*ppos += len;   /* increase offset */
-
-	return len;
+	return simple_read_from_buffer(buff, count, ppos, buf, len);
 }
 
 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1324,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
 		    const char __user *user_buf, size_t count, loff_t *ppos)
 {
 	struct dpu_kms *kms = file->private_data;
-	struct dpu_mdss_cfg *cfg = kms->catalog;
 	int disable_panic;
-	char buf[10];
+	int ret;
 
-	if (!cfg)
-		return -EFAULT;
-
-	if (count >= sizeof(buf))
-		return -EFAULT;
-
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	buf[count] = 0;	/* end of string */
-
-	if (kstrtoint(buf, 0, &disable_panic))
-		return -EFAULT;
+	ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+	if (ret)
+		return ret;
 
 	if (disable_panic) {
 		/* Disable panic signal for all active pipes */
@@ -1365,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
 
 static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 {
-	struct dpu_plane *pdpu;
-	struct dpu_kms *kms;
-	struct msm_drm_private *priv;
-	const struct dpu_sspp_sub_blks *sblk = 0;
-	const struct dpu_sspp_cfg *cfg = 0;
-
-	if (!plane || !plane->dev) {
-		DPU_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	priv = plane->dev->dev_private;
-	if (!priv || !priv->kms) {
-		DPU_ERROR("invalid KMS reference\n");
-		return -EINVAL;
-	}
-
-	kms = to_dpu_kms(priv->kms);
-	pdpu = to_dpu_plane(plane);
-
-	if (pdpu && pdpu->pipe_hw)
-		cfg = pdpu->pipe_hw->cap;
-	if (cfg)
-		sblk = cfg->sblk;
-
-	if (!sblk)
-		return 0;
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+	const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+	const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
 
 	/* create overall sub-directory for the pipe */
 	pdpu->debugfs_root =
@@ -1462,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 
 	return 0;
 }
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-	struct dpu_plane *pdpu;
-
-	if (!plane)
-		return;
-	pdpu = to_dpu_plane(plane);
-
-	debugfs_remove_recursive(pdpu->debugfs_root);
-}
 #else
 static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 {
 	return 0;
 }
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
 #endif
 
 static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1490,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
 
 static void dpu_plane_early_unregister(struct drm_plane *plane)
 {
-	_dpu_plane_destroy_debugfs(plane);
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+	debugfs_remove_recursive(pdpu->debugfs_root);
 }
 
 static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1539,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
 	if (!pdpu) {
 		DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
 		ret = -ENOMEM;
-		goto exit;
+		return ERR_PTR(ret);
 	}
 
 	/* cache local stuff for later */
@@ -1625,6 +1542,5 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
 		dpu_hw_sspp_destroy(pdpu->pipe_hw);
 clean_plane:
 	kfree(pdpu);
-exit:
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644
index fc14116..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
-	[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
-	[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
-	[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
-	if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
-		return data_bus_name[bus_id];
-
-	return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
-		u32 event_type)
-{
-	struct dpu_power_event *event;
-
-	list_for_each_entry(event, &phandle->event_list, list) {
-		if (event->event_type & event_type)
-			event->cb_fnc(event_type, event->usr);
-	}
-}
-
-struct dpu_power_client *dpu_power_client_create(
-	struct dpu_power_handle *phandle, char *client_name)
-{
-	struct dpu_power_client *client;
-	static u32 id;
-
-	if (!client_name || !phandle) {
-		pr_err("client name is null or invalid power data\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
-	if (!client)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_lock(&phandle->phandle_lock);
-	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
-	client->usecase_ndx = VOTE_INDEX_DISABLE;
-	client->id = id;
-	client->active = true;
-	pr_debug("client %s created:%pK id :%d\n", client_name,
-		client, id);
-	id++;
-	list_add(&client->list, &phandle->power_client_clist);
-	mutex_unlock(&phandle->phandle_lock);
-
-	return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
-	struct dpu_power_client *client)
-{
-	if (!client  || !phandle) {
-		pr_err("reg bus vote: invalid client handle\n");
-	} else if (!client->active) {
-		pr_err("dpu power deinit already done\n");
-		kfree(client);
-	} else {
-		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
-			client->name, client, client->id);
-		mutex_lock(&phandle->phandle_lock);
-		list_del_init(&client->list);
-		mutex_unlock(&phandle->phandle_lock);
-		kfree(client);
-	}
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
-	struct dpu_power_handle *phandle)
-{
-	phandle->dev = &pdev->dev;
-
-	INIT_LIST_HEAD(&phandle->power_client_clist);
-	INIT_LIST_HEAD(&phandle->event_list);
-
-	mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
-	struct dpu_power_handle *phandle)
-{
-	struct dpu_power_client *curr_client, *next_client;
-	struct dpu_power_event *curr_event, *next_event;
-
-	if (!phandle || !pdev) {
-		pr_err("invalid input param\n");
-		return;
-	}
-
-	mutex_lock(&phandle->phandle_lock);
-	list_for_each_entry_safe(curr_client, next_client,
-			&phandle->power_client_clist, list) {
-		pr_err("client:%s-%d still registered with refcount:%d\n",
-				curr_client->name, curr_client->id,
-				curr_client->refcount);
-		curr_client->active = false;
-		list_del(&curr_client->list);
-	}
-
-	list_for_each_entry_safe(curr_event, next_event,
-			&phandle->event_list, list) {
-		pr_err("event:%d, client:%s still registered\n",
-				curr_event->event_type,
-				curr_event->client_name);
-		curr_event->active = false;
-		list_del(&curr_event->list);
-	}
-	mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
-	struct dpu_power_client *pclient, bool enable)
-{
-	bool changed = false;
-	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
-	struct dpu_power_client *client;
-	u32 event_type;
-
-	if (!phandle || !pclient) {
-		pr_err("invalid input argument\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&phandle->phandle_lock);
-	if (enable)
-		pclient->refcount++;
-	else if (pclient->refcount)
-		pclient->refcount--;
-
-	if (pclient->refcount)
-		pclient->usecase_ndx = VOTE_INDEX_LOW;
-	else
-		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
-	list_for_each_entry(client, &phandle->power_client_clist, list) {
-		if (client->usecase_ndx < VOTE_INDEX_MAX &&
-		    client->usecase_ndx > max_usecase_ndx)
-			max_usecase_ndx = client->usecase_ndx;
-	}
-
-	if (phandle->current_usecase_ndx != max_usecase_ndx) {
-		changed = true;
-		prev_usecase_ndx = phandle->current_usecase_ndx;
-		phandle->current_usecase_ndx = max_usecase_ndx;
-	}
-
-	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
-		__builtin_return_address(0), changed, max_usecase_ndx,
-		pclient->name, pclient->id, enable, pclient->refcount);
-
-	if (!changed)
-		goto end;
-
-	event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
-
-	dpu_power_event_trigger_locked(phandle,	event_type);
-end:
-	mutex_unlock(&phandle->phandle_lock);
-	return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
-		struct dpu_power_handle *phandle,
-		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-		void *usr, char *client_name)
-{
-	struct dpu_power_event *event;
-
-	if (!phandle) {
-		pr_err("invalid power handle\n");
-		return ERR_PTR(-EINVAL);
-	} else if (!cb_fnc || !event_type) {
-		pr_err("no callback fnc or event type\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
-	if (!event)
-		return ERR_PTR(-ENOMEM);
-
-	event->event_type = event_type;
-	event->cb_fnc = cb_fnc;
-	event->usr = usr;
-	strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
-	event->active = true;
-
-	mutex_lock(&phandle->phandle_lock);
-	list_add(&event->list, &phandle->event_list);
-	mutex_unlock(&phandle->phandle_lock);
-
-	return event;
-}
-
-void dpu_power_handle_unregister_event(
-		struct dpu_power_handle *phandle,
-		struct dpu_power_event *event)
-{
-	if (!phandle || !event) {
-		pr_err("invalid phandle or event\n");
-	} else if (!event->active) {
-		pr_err("power handle deinit already done\n");
-		kfree(event);
-	} else {
-		mutex_lock(&phandle->phandle_lock);
-		list_del_init(&event->list);
-		mutex_unlock(&phandle->phandle_lock);
-		kfree(event);
-	}
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644
index a65b7a2..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
-
-#include "dpu_io_util.h"
-
-/* events will be triggered on power handler enable/disable */
-#define DPU_POWER_EVENT_DISABLE	BIT(0)
-#define DPU_POWER_EVENT_ENABLE	BIT(1)
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
-	VOTE_INDEX_DISABLE,
-	VOTE_INDEX_LOW,
-	VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
-	DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-	DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-	DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
-	DPU_POWER_HANDLE_DBUS_ID_MNOC,
-	DPU_POWER_HANDLE_DBUS_ID_LLCC,
-	DPU_POWER_HANDLE_DBUS_ID_EBI,
-	DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name:	name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount:	current refcount if multiple modules are using same
- *              same client for enable/disable. Power module will
- *              aggregate the refcount and vote accordingly for this
- *              client.
- * @id:		assigned during create. helps for debugging.
- * @list:	list to attach power handle master list
- * @ab:         arbitrated bandwidth for each bus client
- * @ib:         instantaneous bandwidth for each bus client
- * @active:	inidcates the state of dpu power handle
- */
-struct dpu_power_client {
-	char name[MAX_CLIENT_NAME_LEN];
-	short usecase_ndx;
-	short refcount;
-	u32 id;
-	struct list_head list;
-	u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-	u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-	bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
-	char client_name[MAX_CLIENT_NAME_LEN];
-	void (*cb_fnc)(u32 event_type, void *usr);
-	void *usr;
-	u32 event_type;
-	struct list_head list;
-	bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
-	struct list_head power_client_clist;
-	struct mutex phandle_lock;
-	struct device *dev;
-	u32 current_usecase_ndx;
-	struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev:   platform device to search the power resources
- * @pdata:  power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
-	struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev:   platform device for power resources
- * @pdata:  power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
-	struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
-	char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
-	struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata:  power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
-	struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle:  power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
-		struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- *	Clients can register for multiple events with a single register.
- *	Any block with access to phandle can register for the event
- *	notification.
- * @phandle:	power handle containing the resources
- * @event_type:	event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc:	pointer to desired callback function
- * @usr:	user pointer to pass to callback on event trigger
- *
- * Return:	event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
-		struct dpu_power_handle *phandle,
-		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-		void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle:	power handle containing the resources
- * @event:	event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
-		struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id:	data bus identifier
- * Return:	Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index e12c4ce..c78b521 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
 			__entry->vbif_idx)
 )
 
-TRACE_EVENT(dpu_perf_update_bus,
-	TP_PROTO(int client, unsigned long long ab_quota,
-	unsigned long long ib_quota),
-	TP_ARGS(client, ab_quota, ib_quota),
-	TP_STRUCT__entry(
-			__field(int, client)
-			__field(u64, ab_quota)
-			__field(u64, ib_quota)
-	),
-	TP_fast_assign(
-			__entry->client = client;
-			__entry->ab_quota = ab_quota;
-			__entry->ib_quota = ib_quota;
-	),
-	TP_printk("Request client:%d ab=%llu ib=%llu",
-			__entry->client,
-			__entry->ab_quota,
-			__entry->ib_quota)
-)
-
-
 TRACE_EVENT(dpu_cmd_release_bw,
 	TP_PROTO(u32 crtc_id),
 	TP_ARGS(crtc_id),
@@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
 	TP_PROTO(uint32_t drm_id),
 	TP_ARGS(drm_id)
 );
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
 
 TRACE_EVENT(dpu_enc_enable,
 	TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
 	TP_PROTO(uint32_t drm_id, u32 event),
 	TP_ARGS(drm_id, event)
 );
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
-	TP_PROTO(uint32_t drm_id, u32 event),
-	TP_ARGS(drm_id, event)
-);
 DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
 	TP_PROTO(uint32_t drm_id, u32 event),
 	TP_ARGS(drm_id, event)
@@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
 		__field(	uint32_t,		enc_id	)
 		__field(	bool,			enable	)
 		__field(	bool,			enabled )
-		__field(	bool,			suspend )
-		__field(	bool,			vblank_requested )
 	),
 	TP_fast_assign(
 		__entry->drm_id = drm_id;
 		__entry->enc_id = enc_id;
 		__entry->enable = enable;
 		__entry->enabled = crtc->enabled;
-		__entry->suspend = crtc->suspend;
-		__entry->vblank_requested = crtc->vblank_requested;
 	),
-	TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
-		  "vblank_req:%s}",
+	TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
 		  __entry->drm_id, __entry->enc_id,
 		  __entry->enable ? "true" : "false",
-		  __entry->enabled ? "true" : "false",
-		  __entry->suspend ? "true" : "false",
-		  __entry->vblank_requested ? "true" : "false")
+		  __entry->enabled ? "true" : "false")
 );
 
 DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
 		__field(	uint32_t,		drm_id	)
 		__field(	bool,			enable	)
 		__field(	bool,			enabled )
-		__field(	bool,			suspend )
-		__field(	bool,			vblank_requested )
 	),
 	TP_fast_assign(
 		__entry->drm_id = drm_id;
 		__entry->enable = enable;
 		__entry->enabled = crtc->enabled;
-		__entry->suspend = crtc->suspend;
-		__entry->vblank_requested = crtc->vblank_requested;
 	),
-	TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+	TP_printk("id:%u enable:%s state{enabled:%s}",
 		  __entry->drm_id, __entry->enable ? "true" : "false",
-		  __entry->enabled ? "true" : "false",
-		  __entry->suspend ? "true" : "false",
-		  __entry->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
-	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
-	TP_ARGS(drm_id, enable, crtc)
+		  __entry->enabled ? "true" : "false")
 );
 DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
 	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
 		  __entry->stop_req ? "true" : "false", __entry->clk_rate)
 );
 
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+	TP_PROTO(u32 new_bits, u32 pending_mask),
+	TP_ARGS(new_bits, pending_mask),
+	TP_STRUCT__entry(
+		__field(	u32,			new_bits	)
+		__field(	u32,			pending_mask	)
+	),
+	TP_fast_assign(
+		__entry->new_bits = new_bits;
+		__entry->pending_mask = pending_mask;
+	),
+	TP_printk("new=%x existing=%x", __entry->new_bits,
+		  __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+	TP_PROTO(u32 pending_mask, u32 ctl_flush),
+	TP_ARGS(pending_mask, ctl_flush),
+	TP_STRUCT__entry(
+		__field(	u32,			pending_mask	)
+		__field(	u32,			ctl_flush	)
+	),
+	TP_fast_assign(
+		__entry->pending_mask = pending_mask;
+		__entry->ctl_flush = ctl_flush;
+	),
+	TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+		  __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+	TP_PROTO(u32 pending_mask, u32 ctl_flush),
+	TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+	     dpu_hw_ctl_trigger_pending_flush,
+	TP_PROTO(u32 pending_mask, u32 ctl_flush),
+	TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+	TP_PROTO(u32 pending_mask, u32 ctl_flush),
+	TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+	TP_PROTO(u32 pending_mask, u32 ctl_flush),
+	TP_ARGS(pending_mask, ctl_flush)
+);
+
 #define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
 #define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
 #define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 2955282..ef753ea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
 	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
 
 	if (ot_lim == 0)
-		goto exit;
+		return;
 
 	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
 		params->vbif_idx);
@@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
 
 	if (forced_on)
 		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
-	return;
 }
 
 void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
 }
 
 #ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-	debugfs_remove_recursive(dpu_kms->debugfs_vbif);
-	dpu_kms->debugfs_vbif = NULL;
-}
 
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 {
 	char vbif_name[32];
-	struct dentry *debugfs_vbif;
+	struct dentry *entry, *debugfs_vbif;
 	int i, j;
 
-	dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
-	if (!dpu_kms->debugfs_vbif) {
-		DPU_ERROR("failed to create vbif debugfs\n");
-		return -EINVAL;
-	}
+	entry = debugfs_create_dir("vbif", debugfs_root);
+	if (IS_ERR_OR_NULL(entry))
+		return;
 
 	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
 		struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
 
 		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
 
-		debugfs_vbif = debugfs_create_dir(vbif_name,
-				dpu_kms->debugfs_vbif);
+		debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+		if (IS_ERR_OR_NULL(debugfs_vbif))
+			continue;
 
 		debugfs_create_u32("features", 0600, debugfs_vbif,
 			(u32 *)&vbif->features);
@@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 					(u32 *)&cfg->ot_limit);
 		}
 	}
-
-	return 0;
 }
 #endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index f17af52..6356876 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
  */
 void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
 
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
-		struct dentry *debugfs_root)
-{
-	return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
 #endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
index 4f12e5c..9fc9dbd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -813,18 +813,6 @@ enum color_fmts {
 #define COLOR_FMT_P010_UBWC		COLOR_FMT_P010_UBWC
 #define COLOR_FMT_P010		COLOR_FMT_P010
 
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
-	(void)height;
-	(void)width;
-
-	/*
-	 * In the future, calculate the size based on the w/h but just
-	 * hardcode it for now since 16K satisfies all current usecases.
-	 */
-	return 16 * 1024;
-}
-
 /*
  * Function arguments:
  * @color_fmt
@@ -832,114 +820,99 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
 {
-	unsigned int alignment, stride = 0;
+	unsigned int stride = 0;
 
 	if (!width)
-		goto invalid_input;
-
-	switch (color_fmt) {
-	case COLOR_FMT_NV21:
-	case COLOR_FMT_NV12:
-	case COLOR_FMT_NV12_MVTB:
-	case COLOR_FMT_NV12_UBWC:
-		alignment = 128;
-		stride = MSM_MEDIA_ALIGN(width, alignment);
-		break;
-	case COLOR_FMT_NV12_BPP10_UBWC:
-		alignment = 256;
-		stride = MSM_MEDIA_ALIGN(width, 192);
-		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
-		break;
-	case COLOR_FMT_P010_UBWC:
-		alignment = 256;
-		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
-		break;
-	case COLOR_FMT_P010:
-		alignment = 128;
-		stride = MSM_MEDIA_ALIGN(width*2, alignment);
-		break;
-	default:
-		break;
-	}
-invalid_input:
-	return stride;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- */
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
-{
-	unsigned int alignment, stride = 0;
-
-	if (!width)
-		goto invalid_input;
-
-	switch (color_fmt) {
-	case COLOR_FMT_NV21:
-	case COLOR_FMT_NV12:
-	case COLOR_FMT_NV12_MVTB:
-	case COLOR_FMT_NV12_UBWC:
-		alignment = 128;
-		stride = MSM_MEDIA_ALIGN(width, alignment);
-		break;
-	case COLOR_FMT_NV12_BPP10_UBWC:
-		alignment = 256;
-		stride = MSM_MEDIA_ALIGN(width, 192);
-		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
-		break;
-	case COLOR_FMT_P010_UBWC:
-		alignment = 256;
-		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
-		break;
-	case COLOR_FMT_P010:
-		alignment = 128;
-		stride = MSM_MEDIA_ALIGN(width*2, alignment);
-		break;
-	default:
-		break;
-	}
-invalid_input:
-	return stride;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @height
- * Progressive: height
- * Interlaced: (height+1)>>1
- */
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
-{
-	unsigned int alignment, sclines = 0;
-
-	if (!height)
-		goto invalid_input;
-
-	switch (color_fmt) {
-	case COLOR_FMT_NV21:
-	case COLOR_FMT_NV12:
-	case COLOR_FMT_NV12_MVTB:
-	case COLOR_FMT_NV12_UBWC:
-	case COLOR_FMT_P010:
-		alignment = 32;
-		break;
-	case COLOR_FMT_NV12_BPP10_UBWC:
-	case COLOR_FMT_P010_UBWC:
-		alignment = 16;
-		break;
-	default:
 		return 0;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		stride = MSM_MEDIA_ALIGN(width, 128);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		stride = MSM_MEDIA_ALIGN(width * 2, 256);
+		break;
+	case COLOR_FMT_P010:
+		stride = MSM_MEDIA_ALIGN(width * 2, 128);
+		break;
 	}
-	sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+	unsigned int stride = 0;
+
+	if (!width)
+		return 0;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		stride = MSM_MEDIA_ALIGN(width, 128);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		stride = MSM_MEDIA_ALIGN(width * 2, 256);
+		break;
+	case COLOR_FMT_P010:
+		stride = MSM_MEDIA_ALIGN(width * 2, 128);
+		break;
+	}
+
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+	unsigned int sclines = 0;
+
+	if (!height)
+		return 0;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010:
+		sclines = MSM_MEDIA_ALIGN(height, 32);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		sclines = MSM_MEDIA_ALIGN(height, 16);
+		break;
+	}
+
 	return sclines;
 }
 
@@ -950,12 +923,12 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
 {
-	unsigned int alignment, sclines = 0;
+	unsigned int sclines = 0;
 
 	if (!height)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
 	case COLOR_FMT_NV12_BPP10_UBWC:
 	case COLOR_FMT_P010_UBWC:
 	case COLOR_FMT_P010:
-		alignment = 16;
+		sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
 		break;
 	case COLOR_FMT_NV12_UBWC:
-		alignment = 32;
+		sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
 		break;
-	default:
-		goto invalid_input;
 	}
 
-	sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
 	return sclines;
 }
 
@@ -986,12 +954,12 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
 {
-	int y_tile_width = 0, y_meta_stride = 0;
+	int y_tile_width = 0, y_meta_stride;
 
 	if (!width)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
 		y_tile_width = 48;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
 	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
-	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
-	return y_meta_stride;
+	return MSM_MEDIA_ALIGN(y_meta_stride, 64);
 }
 
 /*
@@ -1019,12 +984,12 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
 {
-	int y_tile_height = 0, y_meta_scanlines = 0;
+	int y_tile_height = 0, y_meta_scanlines;
 
 	if (!height)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
 		y_tile_height = 4;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
 	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
-	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
-	return y_meta_scanlines;
+	return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
 }
 
 /*
@@ -1052,12 +1014,12 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
 {
-	int uv_tile_width = 0, uv_meta_stride = 0;
+	int uv_tile_width = 0, uv_meta_stride;
 
 	if (!width)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
 		uv_tile_width = 24;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
 	uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
-	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
-	return uv_meta_stride;
+	return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
 }
 
 /*
@@ -1085,12 +1044,12 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
 {
-	int uv_tile_height = 0, uv_meta_scanlines = 0;
+	int uv_tile_height = 0, uv_meta_scanlines;
 
 	if (!height)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
 		uv_tile_height = 4;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
 	uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
-	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
-	return uv_meta_scanlines;
+	return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
 }
 
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
 {
-	unsigned int alignment = 0, stride = 0, bpp = 4;
+	unsigned int alignment = 0, bpp = 4;
 
 	if (!width)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
 		alignment = 256;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
-	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
-	return stride;
+	return MSM_MEDIA_ALIGN(width * bpp, alignment);
 }
 
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
 {
-	unsigned int alignment = 0, scanlines = 0;
+	unsigned int alignment = 0;
 
 	if (!height)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
 		alignment = 16;
 		break;
 	default:
-		goto invalid_input;
+		return 0;
 	}
 
-	scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
-	return scanlines;
+	return MSM_MEDIA_ALIGN(height, alignment);
 }
 
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
 {
-	int rgb_tile_width = 0, rgb_meta_stride = 0;
+	int rgb_meta_stride;
 
 	if (!width)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_RGBA8888_UBWC:
 	case COLOR_FMT_RGBA1010102_UBWC:
 	case COLOR_FMT_RGB565_UBWC:
-		rgb_tile_width = 16;
-		break;
-	default:
-		goto invalid_input;
+		rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+		return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
 	}
 
-	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
-	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
-	return rgb_meta_stride;
+	return 0;
 }
 
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
 {
-	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+	int rgb_meta_scanlines;
 
 	if (!height)
-		goto invalid_input;
+		return 0;
 
 	switch (color_fmt) {
 	case COLOR_FMT_RGBA8888_UBWC:
 	case COLOR_FMT_RGBA1010102_UBWC:
 	case COLOR_FMT_RGB565_UBWC:
-		rgb_tile_height = 4;
-		break;
-	default:
-		goto invalid_input;
+		rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+		return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
 	}
 
-	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
-	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
-	return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
-	int color_fmt, int width, int height)
-{
-	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
-	unsigned int uv_alignment = 0, size = 0;
-	unsigned int y_plane, uv_plane, y_stride,
-		uv_stride, y_sclines, uv_sclines;
-	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
-	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
-	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
-	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
-	unsigned int rgb_stride = 0, rgb_scanlines = 0;
-	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
-	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
-	if (!width || !height)
-		goto invalid_input;
-
-	y_stride = VENUS_Y_STRIDE(color_fmt, width);
-	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
-	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
-	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
-	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
-	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
-	switch (color_fmt) {
-	case COLOR_FMT_NV21:
-	case COLOR_FMT_NV12:
-	case COLOR_FMT_P010:
-		uv_alignment = 4096;
-		y_plane = y_stride * y_sclines;
-		uv_plane = uv_stride * uv_sclines + uv_alignment;
-		size = y_plane + uv_plane +
-				MSM_MEDIA_MAX(extra_size, 8 * y_stride);
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_NV12_MVTB:
-		uv_alignment = 4096;
-		y_plane = y_stride * y_sclines;
-		uv_plane = uv_stride * uv_sclines + uv_alignment;
-		size = y_plane + uv_plane;
-		size = 2 * size + extra_size;
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_NV12_UBWC:
-		y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
-		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-		uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
-		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-		y_meta_scanlines =
-			VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
-		y_meta_plane = MSM_MEDIA_ALIGN(
-			y_meta_stride * y_meta_scanlines, 4096);
-		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-		uv_meta_scanlines =
-			VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
-		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-			uv_meta_scanlines, 4096);
-
-		size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-			uv_meta_plane)*2 +
-			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_NV12_BPP10_UBWC:
-		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
-		y_meta_plane = MSM_MEDIA_ALIGN(
-				y_meta_stride * y_meta_scanlines, 4096);
-		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
-		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-					uv_meta_scanlines, 4096);
-
-		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-			uv_meta_plane +
-			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_P010_UBWC:
-		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
-		y_meta_plane = MSM_MEDIA_ALIGN(
-				y_meta_stride * y_meta_scanlines, 4096);
-		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
-		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-					uv_meta_scanlines, 4096);
-
-		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-			uv_meta_plane;
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_RGBA8888:
-		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
-		size = rgb_plane;
-		size =  MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	case COLOR_FMT_RGBA8888_UBWC:
-	case COLOR_FMT_RGBA1010102_UBWC:
-	case COLOR_FMT_RGB565_UBWC:
-		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
-							4096);
-		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
-		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
-					height);
-		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
-					rgb_meta_scanlines, 4096);
-		size = rgb_ubwc_plane + rgb_meta_plane;
-		size = MSM_MEDIA_ALIGN(size, 4096);
-		break;
-	default:
-		break;
-	}
-invalid_input:
-	return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
-	int color_fmt, int width, int height)
-{
-	unsigned int offset = 0;
-	unsigned int y_plane, uv_plane, y_stride,
-		uv_stride, y_sclines, uv_sclines;
-	if (!width || !height)
-		goto invalid_input;
-
-	y_stride = VENUS_Y_STRIDE(color_fmt, width);
-	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
-	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
-	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
-	switch (color_fmt) {
-	case COLOR_FMT_NV12_MVTB:
-		y_plane = y_stride * y_sclines;
-		uv_plane = uv_stride * uv_sclines;
-		offset = y_plane + uv_plane;
-		break;
-	default:
-		break;
-	}
-invalid_input:
-	return offset;
+	return 0;
 }
 
 #endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 457c29d..8f2359d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
 	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
 	struct msm_kms *kms = &mdp4_kms->base.base;
 
-	msm_gem_put_iova(val, kms->aspace);
+	msm_gem_unpin_iova(val, kms->aspace);
 	drm_gem_object_put_unlocked(val);
 }
 
@@ -384,7 +384,7 @@ static void update_cursor(struct drm_crtc *crtc)
 		if (next_bo) {
 			/* take a obj ref + iova ref when we start scanning out: */
 			drm_gem_object_get(next_bo);
-			msm_gem_get_iova(next_bo, kms->aspace, &iova);
+			msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
 
 			/* enable cursor: */
 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
 	int ret;
 
 	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
-		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
 		return -EINVAL;
 	}
 
@@ -442,7 +442,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
 	}
 
 	if (cursor_bo) {
-		ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+		ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
 		if (ret)
 			goto fail;
 	} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587..a8fd14d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
 	struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
 
 	if (!dtv_pdata) {
-		dev_err(dev->dev, "could not find dtv pdata\n");
+		DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
 		return;
 	}
 
@@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
 
 	ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
 	if (ret)
-		dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+		DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
 			pc, ret);
 
 	ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
 	if (ret)
-		dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
 
 	ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
 	if (ret)
-		dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
 
 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
 
@@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
 
 	mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
 	if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
-		dev_err(dev->dev, "failed to get hdmi_clk\n");
+		DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
 		ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
 		goto fail;
 	}
 
 	mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
 	if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
-		dev_err(dev->dev, "failed to get tv_clk\n");
+		DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
 		ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
 		goto fail;
 	}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 44d1cda..e437aa8 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
 	DBG("found MDP4 version v%d.%d", major, minor);
 
 	if (major != 4) {
-		dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
 				major, minor);
 		ret = -ENXIO;
 		goto out;
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
 	struct msm_gem_address_space *aspace = kms->aspace;
 
 	if (mdp4_kms->blank_cursor_iova)
-		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
 	drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
 
 	if (aspace) {
@@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
 	clk_disable_unprepare(mdp4_kms->clk);
 	if (mdp4_kms->pclk)
 		clk_disable_unprepare(mdp4_kms->pclk);
-	clk_disable_unprepare(mdp4_kms->lut_clk);
+	if (mdp4_kms->lut_clk)
+		clk_disable_unprepare(mdp4_kms->lut_clk);
 	if (mdp4_kms->axi_clk)
 		clk_disable_unprepare(mdp4_kms->axi_clk);
 
@@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
 	clk_prepare_enable(mdp4_kms->clk);
 	if (mdp4_kms->pclk)
 		clk_prepare_enable(mdp4_kms->pclk);
-	clk_prepare_enable(mdp4_kms->lut_clk);
+	if (mdp4_kms->lut_clk)
+		clk_prepare_enable(mdp4_kms->lut_clk);
 	if (mdp4_kms->axi_clk)
 		clk_prepare_enable(mdp4_kms->axi_clk);
 
@@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
 		if (IS_ERR(encoder)) {
-			dev_err(dev->dev, "failed to construct LCDC encoder\n");
+			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
 			return PTR_ERR(encoder);
 		}
 
@@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
 		connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
 		if (IS_ERR(connector)) {
-			dev_err(dev->dev, "failed to initialize LVDS connector\n");
+			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
 			return PTR_ERR(connector);
 		}
 
@@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 	case DRM_MODE_ENCODER_TMDS:
 		encoder = mdp4_dtv_encoder_init(dev);
 		if (IS_ERR(encoder)) {
-			dev_err(dev->dev, "failed to construct DTV encoder\n");
+			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
 			return PTR_ERR(encoder);
 		}
 
@@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 			/* Construct bridge/connector for HDMI: */
 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
 			if (ret) {
-				dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
 				return ret;
 			}
 		}
@@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 		encoder = mdp4_dsi_encoder_init(dev);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
-			dev_err(dev->dev,
+			DRM_DEV_ERROR(dev->dev,
 				"failed to construct DSI encoder: %d\n", ret);
 			return ret;
 		}
@@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
 		if (ret) {
-			dev_err(dev->dev, "failed to initialize DSI: %d\n",
+			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
 				ret);
 			return ret;
 		}
 
 		break;
 	default:
-		dev_err(dev->dev, "Invalid or unsupported interface\n");
+		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
 		return -EINVAL;
 	}
 
@@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
 		plane = mdp4_plane_init(dev, vg_planes[i], false);
 		if (IS_ERR(plane)) {
-			dev_err(dev->dev,
+			DRM_DEV_ERROR(dev->dev,
 				"failed to construct plane for VG%d\n", i + 1);
 			ret = PTR_ERR(plane);
 			goto fail;
@@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
 		if (IS_ERR(plane)) {
-			dev_err(dev->dev,
+			DRM_DEV_ERROR(dev->dev,
 				"failed to construct plane for RGB%d\n", i + 1);
 			ret = PTR_ERR(plane);
 			goto fail;
@@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
 				mdp4_crtcs[i]);
 		if (IS_ERR(crtc)) {
-			dev_err(dev->dev, "failed to construct crtc for %s\n",
+			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
 				mdp4_crtc_names[i]);
 			ret = PTR_ERR(crtc);
 			goto fail;
@@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
 		if (ret) {
-			dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
 				i, ret);
 			goto fail;
 		}
@@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 
 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
 	if (!mdp4_kms) {
-		dev_err(dev->dev, "failed to allocate kms\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
@@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		ret = irq;
-		dev_err(dev->dev, "failed to get irq: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
 		goto fail;
 	}
 
@@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 	if (mdp4_kms->vdd) {
 		ret = regulator_enable(mdp4_kms->vdd);
 		if (ret) {
-			dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
 			goto fail;
 		}
 	}
 
 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(mdp4_kms->clk)) {
-		dev_err(dev->dev, "failed to get core_clk\n");
+		DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
 		ret = PTR_ERR(mdp4_kms->clk);
 		goto fail;
 	}
@@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 	if (IS_ERR(mdp4_kms->pclk))
 		mdp4_kms->pclk = NULL;
 
-	// XXX if (rev >= MDP_REV_42) { ???
-	mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
-	if (IS_ERR(mdp4_kms->lut_clk)) {
-		dev_err(dev->dev, "failed to get lut_clk\n");
-		ret = PTR_ERR(mdp4_kms->lut_clk);
-		goto fail;
+	if (mdp4_kms->rev >= 2) {
+		mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+		if (IS_ERR(mdp4_kms->lut_clk)) {
+			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+			ret = PTR_ERR(mdp4_kms->lut_clk);
+			goto fail;
+		}
 	}
 
 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
 	if (IS_ERR(mdp4_kms->axi_clk)) {
-		dev_err(dev->dev, "failed to get axi_clk\n");
+		DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
 		ret = PTR_ERR(mdp4_kms->axi_clk);
 		goto fail;
 	}
 
 	clk_set_rate(mdp4_kms->clk, config->max_clk);
-	clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+	if (mdp4_kms->lut_clk)
+		clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
 
 	pm_runtime_enable(dev->dev);
 	mdp4_kms->rpm_enabled = true;
@@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 		if (ret)
 			goto fail;
 	} else {
-		dev_info(dev->dev, "no iommu, fallback to phys "
+		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
 				"contig buffers for scanout\n");
 		aspace = NULL;
 	}
 
 	ret = modeset_init(mdp4_kms);
 	if (ret) {
-		dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
 		goto fail;
 	}
 
-	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
-		dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
 		mdp4_kms->blank_cursor_bo = NULL;
 		goto fail;
 	}
 
-	ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
 			&mdp4_kms->blank_cursor_iova);
 	if (ret) {
-		dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 2bfb390..c9e3450 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
 	struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
 
 	if (!lcdc_pdata) {
-		dev_err(dev->dev, "could not find lvds pdata\n");
+		DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
 		return;
 	}
 
@@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
 		break;
 
 	default:
-		dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+		DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
 		return;
 	}
 
@@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
 				MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
 		break;
 	default:
-		dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+		DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
 		return;
 	}
 
@@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
 	for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
 		ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
 		if (ret)
-			dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+			DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
 	}
 
 	bs_set(mdp4_lcdc_encoder, 0);
@@ -377,20 +377,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
 	unsigned long pc = mdp4_lcdc_encoder->pixclock;
 	struct mdp4_kms *mdp4_kms = get_kms(encoder);
 	struct drm_panel *panel;
+	uint32_t config;
 	int i, ret;
 
 	if (WARN_ON(mdp4_lcdc_encoder->enabled))
 		return;
 
 	/* TODO: hard-coded for 18bpp: */
-	mdp4_crtc_set_config(encoder->crtc,
-			MDP4_DMA_CONFIG_R_BPC(BPC6) |
-			MDP4_DMA_CONFIG_G_BPC(BPC6) |
-			MDP4_DMA_CONFIG_B_BPC(BPC6) |
-			MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
-			MDP4_DMA_CONFIG_PACK(0x21) |
-			MDP4_DMA_CONFIG_DEFLKR_EN |
-			MDP4_DMA_CONFIG_DITHER_EN);
+	config =
+		MDP4_DMA_CONFIG_R_BPC(BPC6) |
+		MDP4_DMA_CONFIG_G_BPC(BPC6) |
+		MDP4_DMA_CONFIG_B_BPC(BPC6) |
+		MDP4_DMA_CONFIG_PACK(0x21) |
+		MDP4_DMA_CONFIG_DEFLKR_EN |
+		MDP4_DMA_CONFIG_DITHER_EN;
+
+	if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+		config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+	mdp4_crtc_set_config(encoder->crtc, config);
 	mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
 
 	bs_set(mdp4_lcdc_encoder, 1);
@@ -398,16 +403,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
 	for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
 		ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
 		if (ret)
-			dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+			DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
 	}
 
 	DBG("setting lcdc_clk=%lu", pc);
 	ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
 	if (ret)
-		dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
 	ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
 	if (ret)
-		dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
 	panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
 	if (!IS_ERR(panel)) {
@@ -461,7 +466,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
 	/* TODO: do we need different pll in other cases? */
 	mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
 	if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
-		dev_err(dev->dev, "failed to get lvds_clk\n");
+		DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
 		ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
 		goto fail;
 	}
@@ -470,7 +475,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
 	reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
 	if (IS_ERR(reg)) {
 		ret = PTR_ERR(reg);
-		dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
 		goto fail;
 	}
 	mdp4_lcdc_encoder->regs[0] = reg;
@@ -478,7 +483,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
 	reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
 	if (IS_ERR(reg)) {
 		ret = PTR_ERR(reg);
-		dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
 		goto fail;
 	}
 	mdp4_lcdc_encoder->regs[1] = reg;
@@ -486,7 +491,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
 	reg = devm_regulator_get(dev->dev, "lvds-vdda");
 	if (IS_ERR(reg)) {
 		ret = PTR_ERR(reg);
-		dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
 		goto fail;
 	}
 	mdp4_lcdc_encoder->regs[2] = reg;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 79ff653..005066f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,6 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 
-	drm_plane_helper_disable(plane, NULL);
 	drm_plane_cleanup(plane);
 
 	kfree(mdp4_plane);
@@ -235,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
 	format = to_mdp_format(msm_framebuffer_format(fb));
 
 	if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
-		dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+		DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
 		return -ERANGE;
 	}
 
 	if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
-		dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+		DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
 		return -ERANGE;
 	}
 
 	if (crtc_w > (src_w * UP_SCALE_MAX)) {
-		dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+		DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
 		return -ERANGE;
 	}
 
 	if (crtc_h > (src_h * UP_SCALE_MAX)) {
-		dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+		DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
 		return -ERANGE;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d..ea8f7d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
 	.max_clk = 412500000,
 };
 
+const struct mdp5_cfg_hw msm8917_config = {
+	.name = "msm8917",
+	.mdp = {
+		.count = 1,
+		.caps = MDP_CAP_CDM,
+	},
+	.ctl = {
+		.count = 3,
+		.base = { 0x01000, 0x01200, 0x01400 },
+		.flush_hw_mask = 0xffffffff,
+	},
+	.pipe_vig = {
+		.count = 1,
+		.base = { 0x04000 },
+		.caps = MDP_PIPE_CAP_HFLIP	|
+			MDP_PIPE_CAP_VFLIP	|
+			MDP_PIPE_CAP_SCALE	|
+			MDP_PIPE_CAP_CSC	|
+			MDP_PIPE_CAP_DECIMATION	|
+			MDP_PIPE_CAP_SW_PIX_EXT	|
+			0,
+	},
+	.pipe_rgb = {
+		.count = 2,
+		.base = { 0x14000, 0x16000 },
+		.caps = MDP_PIPE_CAP_HFLIP	|
+			MDP_PIPE_CAP_VFLIP	|
+			MDP_PIPE_CAP_DECIMATION	|
+			MDP_PIPE_CAP_SW_PIX_EXT	|
+			0,
+	},
+	.pipe_dma = {
+		.count = 1,
+		.base = { 0x24000 },
+		.caps = MDP_PIPE_CAP_HFLIP	|
+			MDP_PIPE_CAP_VFLIP	|
+			MDP_PIPE_CAP_SW_PIX_EXT	|
+			0,
+	},
+	.pipe_cursor = {
+		.count = 1,
+		.base = { 0x34000 },
+		.caps = MDP_PIPE_CAP_HFLIP	|
+			MDP_PIPE_CAP_VFLIP	|
+			MDP_PIPE_CAP_SW_PIX_EXT	|
+			MDP_PIPE_CAP_CURSOR	|
+			0,
+	},
+
+	.lm = {
+		.count = 2,
+		.base = { 0x44000, 0x45000 },
+		.instances = {
+				{ .id = 0, .pp = 0, .dspp = 0,
+				  .caps = MDP_LM_CAP_DISPLAY, },
+				{ .id = 1, .pp = -1, .dspp = -1,
+				  .caps = MDP_LM_CAP_WB },
+			     },
+		.nb_stages = 8,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
+	},
+	.dspp = {
+		.count = 1,
+		.base = { 0x54000 },
+
+	},
+	.pp = {
+		.count = 1,
+		.base = { 0x70000 },
+	},
+	.cdm = {
+		.count = 1,
+		.base = { 0x79200 },
+	},
+	.intf = {
+		.base = { 0x6a000, 0x6a800 },
+		.connect = {
+			[0] = INTF_DISABLED,
+			[1] = INTF_DSI,
+		},
+	},
+	.max_clk = 320000000,
+};
+
 static const struct mdp5_cfg_handler cfg_handlers[] = {
 	{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
 	{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
 	{ .revision = 6, .config = { .hw = &msm8x16_config } },
 	{ .revision = 9, .config = { .hw = &msm8x94_config } },
 	{ .revision = 7, .config = { .hw = &msm8x96_config } },
+	{ .revision = 15, .config = { .hw = &msm8917_config } },
 };
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
 	}
 
 	if (major != 1) {
-		dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+		DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
 				major, minor);
 		ret = -ENXIO;
 		goto fail;
@@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
 		break;
 	}
 	if (unlikely(!mdp5_cfg)) {
-		dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+		DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
 				major, minor);
 		ret = -ENXIO;
 		goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index d6f79dc..c1962f2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
 	int pp_id = mixer->pp;
 
 	if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
-		dev_err(dev, "vsync_clk is not initialized\n");
+		DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
 		return -EINVAL;
 	}
 
 	total_lines_x100 = mode->vtotal * mode->vrefresh;
 	if (!total_lines_x100) {
-		dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+		DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
 				__func__, mode->vtotal, mode->vrefresh);
 		return -EINVAL;
 	}
 
 	vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
 	if (vsync_clk_speed <= 0) {
-		dev_err(dev, "vsync_clk round rate failed %ld\n",
+		DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
 							vsync_clk_speed);
 		return -EINVAL;
 	}
@@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
 	ret = clk_set_rate(mdp5_kms->vsync_clk,
 		clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
 	if (ret) {
-		dev_err(encoder->dev->dev,
+		DRM_DEV_ERROR(encoder->dev->dev,
 			"vsync_clk clk_set_rate failed, %d\n", ret);
 		return ret;
 	}
 	ret = clk_prepare_enable(mdp5_kms->vsync_clk);
 	if (ret) {
-		dev_err(encoder->dev->dev,
+		DRM_DEV_ERROR(encoder->dev->dev,
 			"vsync_clk clk_prepare_enable failed, %d\n", ret);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b1da9ce..c5fde1a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
 	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 	struct msm_kms *kms = &mdp5_kms->base.base;
 
-	msm_gem_put_iova(val, kms->aspace);
+	msm_gem_unpin_iova(val, kms->aspace);
 	drm_gem_object_put_unlocked(val);
 }
 
@@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 
 	ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
 	if (ret) {
-		dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
 		return ret;
 	}
 
@@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	 * and that we don't have conflicting mixer stages:
 	 */
 	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
-		dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
 			cnt, start);
 		return -EINVAL;
 	}
@@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 	}
 
 	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
-		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
 		return -EINVAL;
 	}
 
@@ -903,7 +903,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 	if (!cursor_bo)
 		return -ENOENT;
 
-	ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
 			&mdp5_crtc->cursor.iova);
 	if (ret)
 		return -EINVAL;
@@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 set_cursor:
 	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
 	if (ret) {
-		dev_err(dev->dev, "failed to %sable cursor: %d\n",
+		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
 				cursor_enable ? "en" : "dis", ret);
 		goto end;
 	}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index f93d568..65a871f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
 	struct mdp5_hw_mixer *mixer = pipeline->mixer;
 
 	if (unlikely(WARN_ON(!mixer))) {
-		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+		DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
 			ctl->id);
 		return -EINVAL;
 	}
 
 	if (pipeline->r_mixer) {
-		dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+		DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
 		return -EINVAL;
 	}
 
@@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
 		mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
 		return 0;
 	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
-		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
 		return -EINVAL;
 	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
-		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
 		return -EINVAL;
 	}
 
@@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 		if ((ctl_mgr->ctls[c].status & checkm) == match)
 			goto found;
 
-	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
 	goto unlock;
 
 found:
@@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 
 	ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
 	if (!ctl_mgr) {
-		dev_err(dev->dev, "failed to allocate CTL manager\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
 
 	if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
-		dev_err(dev->dev, "Increase static pool size to at least %d\n",
+		DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
 				ctl_cfg->count);
 		ret = -ENOSPC;
 		goto fail;
@@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 
 		if (WARN_ON(!ctl_cfg->base[c])) {
-			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+			DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
 			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index bddd625..d27e35a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 			minor->debugfs_root, minor);
 
 	if (ret) {
-		dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+		DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
 		return ret;
 	}
 
@@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
 	encoder = mdp5_encoder_init(dev, intf, ctl);
 	if (IS_ERR(encoder)) {
-		dev_err(dev->dev, "failed to construct encoder\n");
+		DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
 		return encoder;
 	}
 
@@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
 		int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
 
 		if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
-			dev_err(dev->dev, "failed to find dsi from intf %d\n",
+			DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
 				intf->num);
 			ret = -EINVAL;
 			break;
@@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
 		break;
 	}
 	default:
-		dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+		DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
 		ret = -EINVAL;
 		break;
 	}
@@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		plane = mdp5_plane_init(dev, type);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
-			dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+			DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
 			goto fail;
 		}
 		priv->planes[priv->num_planes++] = plane;
@@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		crtc  = mdp5_crtc_init(dev, primary[i], cursor[i], i);
 		if (IS_ERR(crtc)) {
 			ret = PTR_ERR(crtc);
-			dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+			DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
 			goto fail;
 		}
 		priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
 	*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
 	*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
 
-	dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+	DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
 }
 
 static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
 	struct device *dev = &pdev->dev;
 	struct clk *clk = msm_clk_get(pdev, name);
 	if (IS_ERR(clk) && mandatory) {
-		dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+		DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
 		return PTR_ERR(clk);
 	}
 	if (IS_ERR(clk))
@@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
 	if (irq < 0) {
 		ret = irq;
-		dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
 		goto fail;
 	}
 
@@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
 				ARRAY_SIZE(iommu_ports));
 		if (ret) {
-			dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+			DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
 				ret);
 			goto fail;
 		}
 	} else {
-		dev_info(&pdev->dev,
+		DRM_DEV_INFO(&pdev->dev,
 			 "no iommu, fallback to phys contig buffers for scanout\n");
 		aspace = NULL;
 	}
@@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 
 	ret = modeset_init(mdp5_kms);
 	if (ret) {
-		dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
 		goto fail;
 	}
 
@@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
 		hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
 		if (IS_ERR(hwpipe)) {
 			ret = PTR_ERR(hwpipe);
-			dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
 					pipe2name(pipes[i]), ret);
 			return ret;
 		}
@@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
 		mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
 		if (IS_ERR(mixer)) {
 			ret = PTR_ERR(mixer);
-			dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
 				i, ret);
 			return ret;
 		}
@@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
 
 		intf = kzalloc(sizeof(*intf), GFP_KERNEL);
 		if (!intf) {
-			dev_err(dev->dev, "failed to construct INTF%d\n", i);
+			DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
 			return -ENOMEM;
 		}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 1cc4e57..889c294 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
 	d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
 				  mdp5_mdss);
 	if (!d) {
-		dev_err(dev, "mdss irq domain add failed\n");
+		DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
 		return -ENXIO;
 	}
 
@@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
 
 	ret = msm_mdss_get_clocks(mdp5_mdss);
 	if (ret) {
-		dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
 		goto fail;
 	}
 
@@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
 
 	ret = regulator_enable(mdp5_mdss->vdd);
 	if (ret) {
-		dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+		DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
 			ret);
 		goto fail;
 	}
@@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
 	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
 			       mdss_irq, 0, "mdss_isr", mdp5_mdss);
 	if (ret) {
-		dev_err(dev->dev, "failed to init irq: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
 		goto fail_irq;
 	}
 
 	ret = mdss_irq_domain_init(mdp5_mdss);
 	if (ret) {
-		dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
 		goto fail_irq;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7f42c3e..be13140 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,6 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 
-	drm_plane_helper_disable(plane, NULL);
 	drm_plane_cleanup(plane);
 
 	kfree(mdp5_plane);
@@ -126,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
 
 	SET_PROPERTY(zpos, ZPOS, uint8_t);
 
-	dev_err(dev->dev, "Invalid property\n");
+	DRM_DEV_ERROR(dev->dev, "Invalid property\n");
 	ret = -EINVAL;
 done:
 	return ret;
@@ -154,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
 
 	GET_PROPERTY(zpos, ZPOS, uint8_t);
 
-	dev_err(dev->dev, "Invalid property\n");
+	DRM_DEV_ERROR(dev->dev, "Invalid property\n");
 	ret = -EINVAL;
 done:
 	return ret;
@@ -659,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
 
 	ret = calc_phase_step(src, dest, &phasex_step);
 	if (ret) {
-		dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+		DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
 	}
 
@@ -684,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
 
 	ret = calc_phase_step(src, dest, &phasey_step);
 	if (ret) {
-		dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+		DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 96c2b82..7cebcb2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
 
 	avail = cnt - bitmap_weight(state->state, cnt);
 	if (nblks > avail) {
-		dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+		DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
 				nblks, avail);
 		return -ENOSPC;
 	}
@@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
 		DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
 		ret = smp_request_block(smp, state, cid, n);
 		if (ret) {
-			dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+			DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
 					n, ret);
 			return ret;
 		}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index a9768f8..7b2a1e6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
 
 	phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
 	if (!phy_node) {
-		dev_err(&pdev->dev, "cannot find phy device\n");
+		DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
 		return -ENXIO;
 	}
 
@@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
 	of_node_put(phy_node);
 
 	if (!phy_pdev || !msm_dsi->phy) {
-		dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+		DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
 		return -EPROBE_DEFER;
 	}
 
@@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 
 	ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
 	if (ret) {
-		dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
 		goto fail;
 	}
 
@@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
 	if (IS_ERR(msm_dsi->bridge)) {
 		ret = PTR_ERR(msm_dsi->bridge);
-		dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
 		msm_dsi->bridge = NULL;
 		goto fail;
 	}
@@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 
 	if (IS_ERR(msm_dsi->connector)) {
 		ret = PTR_ERR(msm_dsi->connector);
-		dev_err(dev->dev,
+		DRM_DEV_ERROR(dev->dev,
 			"failed to create dsi connector: %d\n", ret);
 		msm_dsi->connector = NULL;
 		goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9c6c523..38e481d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
 			msecs_to_jiffies(70));
 
 	if (ret <= 0)
-		dev_err(dev, "wait for video done timed out\n");
+		DRM_DEV_ERROR(dev, "wait for video done timed out\n");
 
 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
 }
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
 		return PTR_ERR(data);
 	}
 
+	msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
 	msm_host->tx_size = msm_host->tx_gem_obj->size;
 
 	return 0;
@@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
 
 	priv = dev->dev_private;
 	if (msm_host->tx_gem_obj) {
-		msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+		msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
 		drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
 		msm_host->tx_gem_obj = NULL;
 	}
@@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
 	if (!dma_base)
 		return -EINVAL;
 
-	return msm_gem_get_iova(msm_host->tx_gem_obj,
+	return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
 				priv->kms->aspace, dma_base);
 }
 
@@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 
 	prop = of_find_property(ep, "data-lanes", &len);
 	if (!prop) {
-		dev_dbg(dev,
+		DRM_DEV_DEBUG(dev,
 			"failed to find data lane mapping, using default\n");
 		return 0;
 	}
@@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 	num_lanes = len / sizeof(u32);
 
 	if (num_lanes < 1 || num_lanes > 4) {
-		dev_err(dev, "bad number of data lanes\n");
+		DRM_DEV_ERROR(dev, "bad number of data lanes\n");
 		return -EINVAL;
 	}
 
@@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 	ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
 					 num_lanes);
 	if (ret) {
-		dev_err(dev, "failed to read lane data\n");
+		DRM_DEV_ERROR(dev, "failed to read lane data\n");
 		return ret;
 	}
 
@@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 		 */
 		for (j = 0; j < num_lanes; j++) {
 			if (lane_map[j] < 0 || lane_map[j] > 3)
-				dev_err(dev, "bad physical lane entry %u\n",
+				DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
 					lane_map[j]);
 
 			if (swap[lane_map[j]] != j)
@@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 	 */
 	endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
 	if (!endpoint) {
-		dev_dbg(dev, "%s: no endpoint\n", __func__);
+		DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
 		return 0;
 	}
 
 	ret = dsi_host_parse_lane_data(msm_host, endpoint);
 	if (ret) {
-		dev_err(dev, "%s: invalid lane configuration %d\n",
+		DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
 			__func__, ret);
 		ret = -EINVAL;
 		goto err;
@@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 	/* Get panel node from the output port's endpoint data */
 	device_node = of_graph_get_remote_node(np, 1, 0);
 	if (!device_node) {
-		dev_dbg(dev, "%s: no valid device\n", __func__);
+		DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
 		ret = -ENODEV;
 		goto err;
 	}
@@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 		msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
 					"syscon-sfpb");
 		if (IS_ERR(msm_host->sfpb)) {
-			dev_err(dev, "%s: failed to get sfpb regmap\n",
+			DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
 				__func__);
 			ret = PTR_ERR(msm_host->sfpb);
 		}
@@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
 	if (msm_host->irq < 0) {
 		ret = msm_host->irq;
-		dev_err(dev->dev, "failed to get irq: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
 		return ret;
 	}
 
@@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 			dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 			"dsi_isr", msm_host);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+		DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
 				msm_host->irq, ret);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9a9fa0c..1760483 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
 
 	ret = devm_regulator_bulk_get(dev, num, s);
 	if (ret < 0) {
-		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+		DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
 						__func__, ret);
 		return ret;
 	}
@@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
 			ret = regulator_set_load(s[i].consumer,
 							regs[i].enable_load);
 			if (ret < 0) {
-				dev_err(dev,
+				DRM_DEV_ERROR(dev,
 					"regulator %d set op mode failed, %d\n",
 					i, ret);
 				goto fail;
@@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
 
 	ret = regulator_bulk_enable(num, s);
 	if (ret < 0) {
-		dev_err(dev, "regulator enable failed, %d\n", ret);
+		DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
 		goto fail;
 	}
 
@@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
 
 	ret = clk_prepare_enable(phy->ahb_clk);
 	if (ret) {
-		dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+		DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
 		pm_runtime_put_sync(dev);
 	}
 
@@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
 	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
 				"DSI_PHY_REG");
 	if (IS_ERR(phy->reg_base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
 			__func__);
 		ret = -ENOMEM;
 		goto fail;
@@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 	phy->id = dsi_phy_get_id(phy);
 	if (phy->id < 0) {
 		ret = phy->id;
-		dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+		DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
 			__func__, ret);
 		goto fail;
 	}
@@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 
 	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
 	if (IS_ERR(phy->base)) {
-		dev_err(dev, "%s: failed to map phy base\n", __func__);
+		DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
 		ret = -ENOMEM;
 		goto fail;
 	}
 
 	ret = dsi_phy_regulator_init(phy);
 	if (ret) {
-		dev_err(dev, "%s: failed to init regulator\n", __func__);
+		DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
 		goto fail;
 	}
 
 	phy->ahb_clk = msm_clk_get(pdev, "iface");
 	if (IS_ERR(phy->ahb_clk)) {
-		dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+		DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
 		ret = PTR_ERR(phy->ahb_clk);
 		goto fail;
 	}
@@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 
 	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
 	if (IS_ERR_OR_NULL(phy->pll))
-		dev_info(dev,
+		DRM_DEV_INFO(dev,
 			"%s: pll init failed: %ld, need separate pll clk driver\n",
 			__func__, PTR_ERR(phy->pll));
 
@@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
 	ret = dsi_phy_enable_resource(phy);
 	if (ret) {
-		dev_err(dev, "%s: resource enable failed, %d\n",
+		DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
 			__func__, ret);
 		goto res_en_fail;
 	}
 
 	ret = dsi_phy_regulator_enable(phy);
 	if (ret) {
-		dev_err(dev, "%s: regulator enable failed, %d\n",
+		DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
 			__func__, ret);
 		goto reg_en_fail;
 	}
 
 	ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
 	if (ret) {
-		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+		DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
 		goto phy_en_fail;
 	}
 
@@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	if (phy->usecase != MSM_DSI_PHY_SLAVE) {
 		ret = msm_dsi_pll_restore_state(phy->pll);
 		if (ret) {
-			dev_err(dev, "%s: failed to restore pll state, %d\n",
+			DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
 				__func__, ret);
 			goto pll_restor_fail;
 		}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index b3fffc8..44959e79 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	DBG("");
 
 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
-		dev_err(&phy->pdev->dev,
+		DRM_DEV_ERROR(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
 	}
@@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
 	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
 	if (ret) {
-		dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
 			__func__, ret);
 		return ret;
 	}
@@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
 	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
 				     "DSI_PHY_LANE");
 	if (IS_ERR(phy->lane_base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
 			__func__);
 		return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 513f423..a172c66 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	void __iomem *lane_base = phy->lane_base;
 
 	if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
-		dev_err(&phy->pdev->dev,
+		DRM_DEV_ERROR(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
 	}
@@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
 	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
 	if (ret) {
-		dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
 			__func__, ret);
 		return ret;
 	}
@@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
 	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
 				"DSI_PHY_LANE");
 	if (IS_ERR(phy->lane_base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
 			__func__);
 		return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69..9ea9478 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	DBG("");
 
 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-		dev_err(&phy->pdev->dev,
+		DRM_DEV_ERROR(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 4972b52..c79505d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	DBG("");
 
 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-		dev_err(&phy->pdev->dev,
+		DRM_DEV_ERROR(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 3980044..98790b4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	DBG("");
 
 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-		dev_err(&phy->pdev->dev,
+		DRM_DEV_ERROR(&phy->pdev->dev,
 			"%s: D-PHY timing calculation failed\n", __func__);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 613e206..7a1fb4d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 	}
 
 	if (IS_ERR(pll)) {
-		dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+		DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
 		return pll;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 41bec57..aabab63 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
  *                              |                |
  *                              |                |
  *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
  *                 +---------+  |  +----------+  |  +----+
  *                              |                |
  *                              |                |         dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
  *                              |                |  +----+  |  |\  dsi0_pclk_mux
  *                              |                |--| /2 |--o--| \   |
  *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0pll
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
  *                              |------------------------------|  /     +---------+
  *                              |          +-----+             | /
  *                              -----------| /4? |--o----------|/
@@ -690,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 
 	hws[num++] = hw;
 
-	snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
 
 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -739,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 
 	hws[num++] = hw;
 
-	snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
 
 	/* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -762,7 +762,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
 				     pll_10nm->hw_data);
 	if (ret) {
-		dev_err(dev, "failed to register clk provider: %d\n", ret);
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
 		return ret;
 	}
 
@@ -790,13 +790,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
 
 	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
 	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
-		dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
 	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
 	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
-		dev_err(&pdev->dev, "failed to map PLL base\n");
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -815,7 +815,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
 
 	ret = pll_10nm_register(pll_10nm);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
 		return ERR_PTR(ret);
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index 71fe60e..0e18cdd 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
 					 POLL_TIMEOUT_US);
 
 	if (unlikely(!locked))
-		dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
 	else
 		DBG("DSI PLL lock success");
 
@@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
 	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
 					cached_state->vco_rate, 0);
 	if (ret) {
-		dev_err(&pll_14nm->pdev->dev,
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev,
 			"restore vco rate failed. ret=%d\n", ret);
 		return ret;
 	}
@@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
 	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
 				     pll_14nm->hw_data);
 	if (ret) {
-		dev_err(dev, "failed to register clk provider: %d\n", ret);
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
 		return ret;
 	}
 
@@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
 
 	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
 	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
-		dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
 	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
 	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
-		dev_err(&pdev->dev, "failed to map PLL base\n");
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
 
 	ret = pll_14nm_register(pll_14nm);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
 		return ERR_PTR(ret);
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 26e3a01..dcbbaeb 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 		if (rate <= lpfr_lut[i].vco_rate)
 			break;
 	if (i == LPFR_LUT_SIZE) {
-		dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
 				rate);
 		return -EINVAL;
 	}
@@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
 	}
 
 	if (unlikely(!locked))
-		dev_err(dev, "DSI PLL lock failed\n");
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
 	else
 		DBG("DSI PLL Lock success");
 
@@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
 
 	if (unlikely(!locked))
-		dev_err(dev, "DSI PLL lock failed\n");
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
 	else
 		DBG("DSI PLL lock success");
 
@@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
 	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
 					cached_state->vco_rate, 0);
 	if (ret) {
-		dev_err(&pll_28nm->pdev->dev,
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
 			"restore vco rate failed. ret=%d\n", ret);
 		return ret;
 	}
@@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
 	ret = of_clk_add_provider(dev->of_node,
 			of_clk_src_onecell_get, &pll_28nm->clk_data);
 	if (ret) {
-		dev_err(dev, "failed to register clk provider: %d\n", ret);
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
 		return ret;
 	}
 
@@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 
 	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
 	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-		dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 		pll->en_seq_cnt = 1;
 		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
 	} else {
-		dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
 		return ERR_PTR(-EINVAL);
 	}
 
 	ret = pll_28nm_register(pll_28nm);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
 		return ERR_PTR(ret);
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 4900845..d689746 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
 
 	if (unlikely(!locked))
-		dev_err(dev, "DSI PLL lock failed\n");
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
 	else
 		DBG("DSI PLL lock success");
 
@@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
 	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
 					cached_state->vco_rate, 0);
 	if (ret) {
-		dev_err(&pll_28nm->pdev->dev,
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
 			"restore vco rate failed. ret=%d\n", ret);
 		return ret;
 	}
@@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
 	ret = of_clk_add_provider(dev->of_node,
 			of_clk_src_onecell_get, &pll_28nm->clk_data);
 	if (ret) {
-		dev_err(dev, "failed to register clk provider: %d\n", ret);
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
 		return ret;
 	}
 
@@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
 
 	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
 	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-		dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
 
 	ret = pll_28nm_register(pll_28nm);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
 		return ERR_PTR(ret);
 	}
 
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..6a63aba 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 	edp->bridge = msm_edp_bridge_init(edp);
 	if (IS_ERR(edp->bridge)) {
 		ret = PTR_ERR(edp->bridge);
-		dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
 		edp->bridge = NULL;
 		goto fail;
 	}
@@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 	edp->connector = msm_edp_connector_init(edp);
 	if (IS_ERR(edp->connector)) {
 		ret = PTR_ERR(edp->connector);
-		dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
 		edp->connector = NULL;
 		goto fail;
 	}
@@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 	edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
 	if (edp->irq < 0) {
 		ret = edp->irq;
-		dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
 		goto fail;
 	}
 
@@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 			edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 			"edp_isr", edp);
 	if (ret < 0) {
-		dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+		DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
 				edp->irq, ret);
 		goto fail;
 	}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index adbdce3..e247d69 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
 
 	phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
 	if (!phy_node) {
-		dev_err(&pdev->dev, "cannot find phy device\n");
+		DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
 		return -ENXIO;
 	}
 
@@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
 	of_node_put(phy_node);
 
 	if (!phy_pdev || !hdmi->phy) {
-		dev_err(&pdev->dev, "phy driver is not ready\n");
+		DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
 		return -EPROBE_DEFER;
 	}
 
@@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 	hdmi->qfprom_mmio = msm_ioremap(pdev,
 		config->qfprom_mmio_name, "HDMI_QFPROM");
 	if (IS_ERR(hdmi->qfprom_mmio)) {
-		dev_info(&pdev->dev, "can't find qfprom resource\n");
+		DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
 		hdmi->qfprom_mmio = NULL;
 	}
 
@@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 				config->hpd_reg_names[i]);
 		if (IS_ERR(reg)) {
 			ret = PTR_ERR(reg);
-			dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+			DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
 					config->hpd_reg_names[i], ret);
 			goto fail;
 		}
@@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 				config->pwr_reg_names[i]);
 		if (IS_ERR(reg)) {
 			ret = PTR_ERR(reg);
-			dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+			DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
 					config->pwr_reg_names[i], ret);
 			goto fail;
 		}
@@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 		clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
 		if (IS_ERR(clk)) {
 			ret = PTR_ERR(clk);
-			dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+			DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
 					config->hpd_clk_names[i], ret);
 			goto fail;
 		}
@@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 		clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
 		if (IS_ERR(clk)) {
 			ret = PTR_ERR(clk);
-			dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+			DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
 					config->pwr_clk_names[i], ret);
 			goto fail;
 		}
@@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 	hdmi->i2c = msm_hdmi_i2c_init(hdmi);
 	if (IS_ERR(hdmi->i2c)) {
 		ret = PTR_ERR(hdmi->i2c);
-		dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+		DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
 		hdmi->i2c = NULL;
 		goto fail;
 	}
 
 	ret = msm_hdmi_get_phy(hdmi);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to get phy\n");
+		DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
 		goto fail;
 	}
 
@@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
 	hdmi->bridge = msm_hdmi_bridge_init(hdmi);
 	if (IS_ERR(hdmi->bridge)) {
 		ret = PTR_ERR(hdmi->bridge);
-		dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
 		hdmi->bridge = NULL;
 		goto fail;
 	}
@@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
 	hdmi->connector = msm_hdmi_connector_init(hdmi);
 	if (IS_ERR(hdmi->connector)) {
 		ret = PTR_ERR(hdmi->connector);
-		dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
 		hdmi->connector = NULL;
 		goto fail;
 	}
@@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
 	hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
 	if (hdmi->irq < 0) {
 		ret = hdmi->irq;
-		dev_err(dev->dev, "failed to get irq: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
 		goto fail;
 	}
 
@@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
 			msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 			"hdmi_isr", hdmi);
 	if (ret < 0) {
-		dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+		DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
 				hdmi->irq, ret);
 		goto fail;
 	}
@@ -482,7 +482,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
 	unsigned int level_shift  = 0; /* 0dB */
 	bool down_mix = false;
 
-	dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+	DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
 		 params->sample_width, params->cea.channels);
 
 	switch (params->cea.channels) {
@@ -533,7 +533,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
 		rate = HDMI_SAMPLE_RATE_192KHZ;
 		break;
 	default:
-		dev_err(dev, "rate[%d] not supported!\n",
+		DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
 			params->sample_rate);
 		return -EINVAL;
 	}
@@ -585,7 +585,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
 	hdmi_cfg = (struct hdmi_platform_config *)
 			of_device_get_match_data(dev);
 	if (!hdmi_cfg) {
-		dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
+		DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
 		return -ENXIO;
 	}
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7e35707..98d61c6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
 	for (i = 0; i < config->pwr_reg_cnt; i++) {
 		ret = regulator_enable(hdmi->pwr_regs[i]);
 		if (ret) {
-			dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
 					config->pwr_reg_names[i], ret);
 		}
 	}
@@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
 		DBG("pixclock: %lu", hdmi->pixclock);
 		ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
 		if (ret) {
-			dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
 					config->pwr_clk_names[0], ret);
 		}
 	}
@@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
 	for (i = 0; i < config->pwr_clk_cnt; i++) {
 		ret = clk_prepare_enable(hdmi->pwr_clks[i]);
 		if (ret) {
-			dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
 					config->pwr_clk_names[i], ret);
 		}
 	}
@@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
 	for (i = 0; i < config->pwr_reg_cnt; i++) {
 		ret = regulator_disable(hdmi->pwr_regs[i]);
 		if (ret) {
-			dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+			DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
 					config->pwr_reg_names[i], ret);
 		}
 	}
@@ -105,7 +105,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
 
 	len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
 	if (len < 0) {
-		dev_err(&hdmi->pdev->dev,
+		DRM_DEV_ERROR(&hdmi->pdev->dev,
 			"failed to configure avi infoframe\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 30e908d..a6eeab2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
 			if (gpio.num != -1) {
 				ret = gpio_request(gpio.num, gpio.label);
 				if (ret) {
-					dev_err(dev,
+					DRM_DEV_ERROR(dev,
 						"'%s'(%d) gpio_request failed: %d\n",
 						gpio.label, gpio.num, ret);
 					goto err;
@@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
 
 			ret = clk_prepare_enable(hdmi->hpd_clks[i]);
 			if (ret) {
-				dev_err(dev,
+				DRM_DEV_ERROR(dev,
 					"failed to enable hpd clk: %s (%d)\n",
 					config->hpd_clk_names[i], ret);
 			}
@@ -180,7 +180,7 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector)
 	for (i = 0; i < config->hpd_reg_cnt; i++) {
 		ret = regulator_enable(hdmi->hpd_regs[i]);
 		if (ret) {
-			dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+			DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
 					config->hpd_reg_names[i], ret);
 			goto fail;
 		}
@@ -188,13 +188,13 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector)
 
 	ret = pinctrl_pm_select_default_state(dev);
 	if (ret) {
-		dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+		DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
 		goto fail;
 	}
 
 	ret = gpio_config(hdmi, true);
 	if (ret) {
-		dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+		DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 73e2021..25d2fe2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
 	} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
 
 	if (!retry) {
-		dev_err(dev->dev, "timeout waiting for DDC\n");
+		DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
 		return -ETIMEDOUT;
 	}
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 4157722..1f4331e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
 		reg = devm_regulator_get(dev, cfg->reg_names[i]);
 		if (IS_ERR(reg)) {
 			ret = PTR_ERR(reg);
-			dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+			DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
 				cfg->reg_names[i], ret);
 			return ret;
 		}
@@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
 		clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
 		if (IS_ERR(clk)) {
 			ret = PTR_ERR(clk);
-			dev_err(dev, "failed to get phy clock: %s (%d)\n",
+			DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
 				cfg->clk_names[i], ret);
 			return ret;
 		}
@@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
 	for (i = 0; i < cfg->num_regs; i++) {
 		ret = regulator_enable(phy->regs[i]);
 		if (ret)
-			dev_err(dev, "failed to enable regulator: %s (%d)\n",
+			DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
 				cfg->reg_names[i], ret);
 	}
 
 	for (i = 0; i < cfg->num_clks; i++) {
 		ret = clk_prepare_enable(phy->clks[i]);
 		if (ret)
-			dev_err(dev, "failed to enable clock: %s (%d)\n",
+			DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
 				cfg->clk_names[i], ret);
 	}
 
@@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
 
 	phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
 	if (IS_ERR(phy->mmio)) {
-		dev_err(dev, "%s: failed to map phy base\n", __func__);
+		DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
 		return -ENOMEM;
 	}
 
@@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
 
 	ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
 	if (ret) {
-		dev_err(dev, "couldn't init PLL\n");
+		DRM_DEV_ERROR(dev, "couldn't init PLL\n");
 		msm_hdmi_phy_resource_disable(phy);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 0df504c..318708f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
 	pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
 	if (IS_ERR(pll->mmio_qserdes_com)) {
-		dev_err(dev, "failed to map pll base\n");
+		DRM_DEV_ERROR(dev, "failed to map pll base\n");
 		return -ENOMEM;
 	}
 
@@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
 		pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
 		if (IS_ERR(pll->mmio_qserdes_tx[i])) {
-			dev_err(dev, "failed to map pll base\n");
+			DRM_DEV_ERROR(dev, "failed to map pll base\n");
 			return -ENOMEM;
 		}
 	}
@@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
 	clk = devm_clk_register(dev, &pll->clk_hw);
 	if (IS_ERR(clk)) {
-		dev_err(dev, "failed to register pll clock\n");
+		DRM_DEV_ERROR(dev, "failed to register pll clock\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 9959075..c6dae6e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 
 	pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
 	if (IS_ERR(pll->mmio)) {
-		dev_err(dev, "failed to map pll base\n");
+		DRM_DEV_ERROR(dev, "failed to map pll base\n");
 		return -ENOMEM;
 	}
 
@@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 
 	clk = devm_clk_register(dev, &pll->clk_hw);
 	if (IS_ERR(clk)) {
-		dev_err(dev, "failed to register pll clock\n");
+		DRM_DEV_ERROR(dev, "failed to register pll clock\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 2088a20..f5b1256 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -83,7 +83,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
 		kms->funcs->commit(kms, state);
 	}
 
-	msm_atomic_wait_for_commit_done(dev, state);
+	if (!state->legacy_cursor_update)
+		msm_atomic_wait_for_commit_done(dev, state);
 
 	kms->funcs->complete_commit(kms, state);
 
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index d756436..fb423d3 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -201,13 +201,13 @@ static int late_init_minor(struct drm_minor *minor)
 
 	ret = msm_rd_debugfs_init(minor);
 	if (ret) {
-		dev_err(minor->dev->dev, "could not install rd debugfs\n");
+		DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
 		return ret;
 	}
 
 	ret = msm_perf_debugfs_init(minor);
 	if (ret) {
-		dev_err(minor->dev->dev, "could not install perf debugfs\n");
+		DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
 		return ret;
 	}
 
@@ -235,14 +235,14 @@ int msm_debugfs_init(struct drm_minor *minor)
 			minor->debugfs_root, minor);
 
 	if (ret) {
-		dev_err(dev->dev, "could not install msm_debugfs_list\n");
+		DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
 		return ret;
 	}
 
 	debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
 		dev, &msm_gpu_fops);
 
-	if (priv->kms->funcs->debugfs_init) {
+	if (priv->kms && priv->kms->funcs->debugfs_init) {
 		ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index dcff812..d2cdc7b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,8 +23,10 @@
 #include "msm_drv.h"
 #include "msm_debugfs.h"
 #include "msm_fence.h"
+#include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
+#include "adreno/adreno_gpu.h"
 
 
 /*
@@ -35,9 +37,11 @@
  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  *           MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ *           GEM object's debug name
  */
 #define MSM_VERSION_MAJOR	1
-#define MSM_VERSION_MINOR	3
+#define MSM_VERSION_MINOR	4
 #define MSM_VERSION_PATCHLEVEL	0
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -170,7 +174,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+		DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -178,7 +182,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 
 	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
 	if (!ptr) {
-		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+		DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -312,6 +316,7 @@ static int msm_drm_uninit(struct device *dev)
 	if (fbdev && priv->fbdev)
 		msm_fbdev_free(ddev);
 #endif
+	drm_atomic_helper_shutdown(ddev);
 	drm_mode_config_cleanup(ddev);
 
 	pm_runtime_get_sync(dev);
@@ -357,6 +362,14 @@ static int get_mdp_ver(struct platform_device *pdev)
 
 #include <linux/of_address.h>
 
+bool msm_use_mmu(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+
+	/* a2xx comes with its own MMU */
+	return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
 static int msm_init_vram(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -395,7 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
 		 * Grab the entire CMA chunk carved out in early startup in
 		 * mach-msm:
 		 */
-	} else if (!iommu_present(&platform_bus_type)) {
+	} else if (!msm_use_mmu(dev)) {
 		DRM_INFO("using %s VRAM carveout\n", vram);
 		size = memparse(vram, NULL);
 	}
@@ -418,12 +431,12 @@ static int msm_init_vram(struct drm_device *dev)
 		p = dma_alloc_attrs(dev->dev, size,
 				&priv->vram.paddr, GFP_KERNEL, attrs);
 		if (!p) {
-			dev_err(dev->dev, "failed to allocate VRAM\n");
+			DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
 			priv->vram.paddr = 0;
 			return -ENOMEM;
 		}
 
-		dev_info(dev->dev, "VRAM: %08x->%08x\n",
+		DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
 				(uint32_t)priv->vram.paddr,
 				(uint32_t)(priv->vram.paddr + size));
 	}
@@ -443,7 +456,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
 	ddev = drm_dev_alloc(drv, dev);
 	if (IS_ERR(ddev)) {
-		dev_err(dev, "failed to allocate drm_device\n");
+		DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
 		return PTR_ERR(ddev);
 	}
 
@@ -507,19 +520,16 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 		priv->kms = kms;
 		break;
 	default:
-		kms = ERR_PTR(-ENODEV);
+		/* valid only for the dummy headless case, where of_node=NULL */
+		WARN_ON(dev->of_node);
+		kms = NULL;
 		break;
 	}
 
 	if (IS_ERR(kms)) {
-		/*
-		 * NOTE: once we have GPU support, having no kms should not
-		 * be considered fatal.. ideally we would still support gpu
-		 * and (for example) use dmabuf/prime to share buffers with
-		 * imx drm driver on iMX5
-		 */
-		dev_err(dev, "failed to load kms\n");
+		DRM_DEV_ERROR(dev, "failed to load kms\n");
 		ret = PTR_ERR(kms);
+		priv->kms = NULL;
 		goto err_msm_uninit;
 	}
 
@@ -529,7 +539,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	if (kms) {
 		ret = kms->funcs->hw_init(kms);
 		if (ret) {
-			dev_err(dev, "kms hw init failed: %d\n", ret);
+			DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
 			goto err_msm_uninit;
 		}
 	}
@@ -554,7 +564,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 				&priv->disp_thread[i].worker,
 				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
 		if (IS_ERR(priv->disp_thread[i].thread)) {
-			dev_err(dev, "failed to create crtc_commit kthread\n");
+			DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
 			priv->disp_thread[i].thread = NULL;
 			goto err_msm_uninit;
 		}
@@ -574,7 +584,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 				&priv->event_thread[i].worker,
 				"crtc_event:%d", priv->event_thread[i].crtc_id);
 		if (IS_ERR(priv->event_thread[i].thread)) {
-			dev_err(dev, "failed to create crtc_event kthread\n");
+			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
 			priv->event_thread[i].thread = NULL;
 			goto err_msm_uninit;
 		}
@@ -595,7 +605,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
 	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
-		dev_err(dev, "failed to initialize vblank\n");
+		DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
 		goto err_msm_uninit;
 	}
 
@@ -604,7 +614,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 		ret = drm_irq_install(ddev, kms->irq);
 		pm_runtime_put_sync(dev);
 		if (ret < 0) {
-			dev_err(dev, "failed to install IRQ handler\n");
+			DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
 			goto err_msm_uninit;
 		}
 	}
@@ -616,7 +626,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	drm_mode_config_reset(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (fbdev)
+	if (kms && fbdev)
 		priv->fbdev = msm_fbdev_init(ddev);
 #endif
 
@@ -724,7 +734,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	BUG_ON(!kms);
-	return kms->funcs->irq_postinstall(kms);
+
+	if (kms->funcs->irq_postinstall)
+		return kms->funcs->irq_postinstall(kms);
+
+	return 0;
 }
 
 static void msm_irq_uninstall(struct drm_device *dev)
@@ -791,7 +805,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 	}
 
 	return msm_gem_new_handle(dev, file, args->size,
-			args->flags, &args->handle);
+			args->flags, &args->handle, NULL);
 }
 
 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -849,6 +863,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
 	if (!priv->gpu)
 		return -EINVAL;
 
+	/*
+	 * Don't pin the memory here - just get an address so that userspace can
+	 * be productive
+	 */
 	return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 }
 
@@ -857,23 +875,66 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 {
 	struct drm_msm_gem_info *args = data;
 	struct drm_gem_object *obj;
-	int ret = 0;
+	struct msm_gem_object *msm_obj;
+	int i, ret = 0;
 
-	if (args->flags & ~MSM_INFO_FLAGS)
+	if (args->pad)
 		return -EINVAL;
 
+	switch (args->info) {
+	case MSM_INFO_GET_OFFSET:
+	case MSM_INFO_GET_IOVA:
+		/* value returned as immediate, not pointer, so len==0: */
+		if (args->len)
+			return -EINVAL;
+		break;
+	case MSM_INFO_SET_NAME:
+	case MSM_INFO_GET_NAME:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
-	if (args->flags & MSM_INFO_IOVA) {
-		uint64_t iova;
+	msm_obj = to_msm_bo(obj);
 
-		ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
-		if (!ret)
-			args->offset = iova;
-	} else {
-		args->offset = msm_gem_mmap_offset(obj);
+	switch (args->info) {
+	case MSM_INFO_GET_OFFSET:
+		args->value = msm_gem_mmap_offset(obj);
+		break;
+	case MSM_INFO_GET_IOVA:
+		ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+		break;
+	case MSM_INFO_SET_NAME:
+		/* length check should leave room for terminating null: */
+		if (args->len >= sizeof(msm_obj->name)) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = copy_from_user(msm_obj->name,
+			u64_to_user_ptr(args->value), args->len);
+		msm_obj->name[args->len] = '\0';
+		for (i = 0; i < args->len; i++) {
+			if (!isprint(msm_obj->name[i])) {
+				msm_obj->name[i] = '\0';
+				break;
+			}
+		}
+		break;
+	case MSM_INFO_GET_NAME:
+		if (args->value && (args->len < strlen(msm_obj->name))) {
+			ret = -EINVAL;
+			break;
+		}
+		args->len = strlen(msm_obj->name);
+		if (args->value) {
+			ret = copy_to_user(u64_to_user_ptr(args->value),
+					msm_obj->name, args->len);
+		}
+		break;
 	}
 
 	drm_gem_object_put_unlocked(obj);
@@ -1052,18 +1113,15 @@ static int msm_pm_suspend(struct device *dev)
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct msm_drm_private *priv = ddev->dev_private;
-	struct msm_kms *kms = priv->kms;
 
-	/* TODO: Use atomic helper suspend/resume */
-	if (kms && kms->funcs && kms->funcs->pm_suspend)
-		return kms->funcs->pm_suspend(dev);
-
-	drm_kms_helper_poll_disable(ddev);
+	if (WARN_ON(priv->pm_state))
+		drm_atomic_state_put(priv->pm_state);
 
 	priv->pm_state = drm_atomic_helper_suspend(ddev);
 	if (IS_ERR(priv->pm_state)) {
-		drm_kms_helper_poll_enable(ddev);
-		return PTR_ERR(priv->pm_state);
+		int ret = PTR_ERR(priv->pm_state);
+		DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+		return ret;
 	}
 
 	return 0;
@@ -1073,16 +1131,16 @@ static int msm_pm_resume(struct device *dev)
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct msm_drm_private *priv = ddev->dev_private;
-	struct msm_kms *kms = priv->kms;
+	int ret;
 
-	/* TODO: Use atomic helper suspend/resume */
-	if (kms && kms->funcs && kms->funcs->pm_resume)
-		return kms->funcs->pm_resume(dev);
+	if (WARN_ON(!priv->pm_state))
+		return -ENOENT;
 
-	drm_atomic_helper_resume(ddev, priv->pm_state);
-	drm_kms_helper_poll_enable(ddev);
+	ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+	if (!ret)
+		priv->pm_state = NULL;
 
-	return 0;
+	return ret;
 }
 #endif
 
@@ -1167,7 +1225,7 @@ static int add_components_mdp(struct device *mdp_dev,
 
 		ret = of_graph_parse_endpoint(ep_node, &ep);
 		if (ret) {
-			dev_err(mdp_dev, "unable to parse port endpoint\n");
+			DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
 			of_node_put(ep_node);
 			return ret;
 		}
@@ -1189,8 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev,
 		if (!intf)
 			continue;
 
-		drm_of_component_match_add(master_dev, matchptr, compare_of,
-					   intf);
+		if (of_device_is_available(intf))
+			drm_of_component_match_add(master_dev, matchptr,
+						   compare_of, intf);
+
 		of_node_put(intf);
 	}
 
@@ -1218,13 +1278,13 @@ static int add_display_components(struct device *dev,
 	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 		if (ret) {
-			dev_err(dev, "failed to populate children devices\n");
+			DRM_DEV_ERROR(dev, "failed to populate children devices\n");
 			return ret;
 		}
 
 		mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
 		if (!mdp_dev) {
-			dev_err(dev, "failed to find MDSS MDP node\n");
+			DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
 			of_platform_depopulate(dev);
 			return -ENODEV;
 		}
@@ -1254,6 +1314,7 @@ static int add_display_components(struct device *dev,
 static const struct of_device_id msm_gpu_match[] = {
 	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,adreno-3xx" },
+	{ .compatible = "amd,imageon" },
 	{ .compatible = "qcom,kgsl-3d0" },
 	{ },
 };
@@ -1298,9 +1359,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
 	struct component_match *match = NULL;
 	int ret;
 
-	ret = add_display_components(&pdev->dev, &match);
-	if (ret)
-		return ret;
+	if (get_mdp_ver(pdev)) {
+		ret = add_display_components(&pdev->dev, &match);
+		if (ret)
+			return ret;
+	}
 
 	ret = add_gpu_components(&pdev->dev, &match);
 	if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f32..9cd6a96 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -179,6 +179,8 @@ struct msm_drm_private {
 	/* when we have more than one 'msm_gpu' these need to be an array: */
 	struct msm_gpu *gpu;
 	struct msm_file_private *lastctx;
+	/* gpu is only set on open(), but we need this info earlier */
+	bool is_a2xx;
 
 	struct drm_fb_helper *fbdev;
 
@@ -241,10 +243,16 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma);
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt);
+		struct msm_gem_vma *vma);
 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma);
 
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
@@ -252,9 +260,15 @@ struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name);
 
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+		const char *name, uint64_t va_start, uint64_t va_end);
+
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
+bool msm_use_mmu(struct drm_device *dev);
+
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
@@ -269,12 +283,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova);
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
-		struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -301,7 +317,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle);
+		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,9 +328,13 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 		uint32_t flags, struct msm_gem_address_space *aspace,
 		struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+		struct msm_gem_address_space *aspace, bool locked);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
 
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace);
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348a..67dfd8d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -66,7 +66,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 	uint64_t iova;
 
 	for (i = 0; i < n; i++) {
-		ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+		ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
 		DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
 		if (ret)
 			return ret;
@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 	int i, n = fb->format->num_planes;
 
 	for (i = 0; i < n; i++)
-		msm_gem_put_iova(fb->obj[i], aspace);
+		msm_gem_unpin_iova(fb->obj[i], aspace);
 }
 
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 	format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
 			mode_cmd->modifier[0]);
 	if (!format) {
-		dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+		DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
 				(char *)&mode_cmd->pixel_format);
 		ret = -EINVAL;
 		goto fail;
@@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
 	ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
 	if (ret) {
-		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
 		goto fail;
 	}
 
@@ -233,13 +233,15 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
 		bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
 	}
 	if (IS_ERR(bo)) {
-		dev_err(dev->dev, "failed to allocate buffer object\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
 		return ERR_CAST(bo);
 	}
 
+	msm_gem_object_set_name(bo, "stolenfb");
+
 	fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
 	if (IS_ERR(fb)) {
-		dev_err(dev->dev, "failed to allocate fb\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
 		/* note: if fb creation failed, we can't rely on fb destroy
 		 * to unref the bo:
 		 */
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b..c03e860 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 			sizes->surface_height, pitch, format);
 
 	if (IS_ERR(fb)) {
-		dev_err(dev->dev, "failed to allocate fb\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
 		return PTR_ERR(fb);
 	}
 
@@ -104,15 +104,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 	 * in panic (ie. lock-safe, etc) we could avoid pinning the
 	 * buffer now:
 	 */
-	ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+	ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
 	if (ret) {
-		dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
 		goto fail_unlock;
 	}
 
 	fbi = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(fbi)) {
-		dev_err(dev->dev, "failed to allocate fb info\n");
+		DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
 		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
@@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
 
 	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
 	if (ret) {
-		dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+		DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27..51a95da 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 			p = get_pages_vram(obj, npages);
 
 		if (IS_ERR(p)) {
-			dev_err(dev->dev, "could not get pages: %ld\n",
+			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 					PTR_ERR(p));
 			return p;
 		}
@@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 		if (IS_ERR(msm_obj->sgt)) {
 			void *ptr = ERR_CAST(msm_obj->sgt);
 
-			dev_err(dev->dev, "failed to allocate sgt\n");
+			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 			msm_obj->sgt = NULL;
 			return ptr;
 		}
@@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
 	ret = drm_gem_create_mmap_offset(obj);
 
 	if (ret) {
-		dev_err(dev->dev, "could not allocate mmap offset\n");
+		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 		return 0;
 	}
 
@@ -352,63 +352,104 @@ put_iova(struct drm_gem_object *obj)
 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
-		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+		msm_gem_purge_vma(vma->aspace, vma);
+		msm_gem_close_vma(vma->aspace, vma);
 		del_vma(vma);
 	}
 }
 
-/* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 	int ret = 0;
 
-	mutex_lock(&msm_obj->lock);
-
-	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-		mutex_unlock(&msm_obj->lock);
-		return -EBUSY;
-	}
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	vma = lookup_vma(obj, aspace);
 
 	if (!vma) {
-		struct page **pages;
-
 		vma = add_vma(obj, aspace);
-		if (IS_ERR(vma)) {
-			ret = PTR_ERR(vma);
-			goto unlock;
-		}
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
 
-		pages = get_pages(obj);
-		if (IS_ERR(pages)) {
-			ret = PTR_ERR(pages);
-			goto fail;
+		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+		if (ret) {
+			del_vma(vma);
+			return ret;
 		}
-
-		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
-				obj->size >> PAGE_SHIFT);
-		if (ret)
-			goto fail;
 	}
 
 	*iova = vma->iova;
-
-	mutex_unlock(&msm_obj->lock);
 	return 0;
+}
 
-fail:
-	del_vma(vma);
-unlock:
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+	struct page **pages;
+
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+		return -EBUSY;
+
+	vma = lookup_vma(obj, aspace);
+	if (WARN_ON(!vma))
+		return -EINVAL;
+
+	pages = get_pages(obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
+	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+			obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	u64 local;
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+
+	ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+	if (!ret)
+		ret = msm_gem_pin_iova(obj, aspace);
+
+	if (!ret)
+		*iova = local;
+
 	mutex_unlock(&msm_obj->lock);
 	return ret;
 }
 
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+	ret = msm_gem_get_iova_locked(obj, aspace, iova);
+	mutex_unlock(&msm_obj->lock);
+
+	return ret;
+}
+
 /* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
  */
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
@@ -424,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
 	return vma ? vma->iova : 0;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
-	// XXX TODO ..
-	// NOTE: probably don't need a _locked() version.. we wouldn't
-	// normally unmap here, but instead just mark that it could be
-	// unmapped (if the iova refcnt drops to zero), but then later
-	// if another _get_iova_locked() fails we can start unmapping
-	// things that are no longer needed..
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+
+	mutex_lock(&msm_obj->lock);
+	vma = lookup_vma(obj, aspace);
+
+	if (!WARN_ON(!vma))
+		msm_gem_unmap_vma(aspace, vma);
+
+	mutex_unlock(&msm_obj->lock);
 }
 
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	args->pitch = align_pitch(args->width, args->bpp);
 	args->size  = PAGE_ALIGN(args->pitch * args->height);
 	return msm_gem_new_handle(dev, file, args->size,
-			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +523,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 	mutex_lock(&msm_obj->lock);
 
 	if (WARN_ON(msm_obj->madv > madv)) {
-		dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 			msm_obj->madv, madv);
 		mutex_unlock(&msm_obj->lock);
 		return ERR_PTR(-EBUSY);
@@ -739,16 +789,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 		break;
 	}
 
-	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			obj->name, kref_read(&obj->refcount),
 			off, msm_obj->vaddr);
 
-	/* FIXME: we need to print the address space here too */
-	list_for_each_entry(vma, &msm_obj->vmas, list)
-		seq_printf(m, " %08llx", vma->iova);
+	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 
-	seq_printf(m, " %zu%s\n", obj->size, madv);
+	if (!list_empty(&msm_obj->vmas)) {
+
+		seq_puts(m, "      vmas:");
+
+		list_for_each_entry(vma, &msm_obj->vmas, list)
+			seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+				vma->iova, vma->mapped ? "mapped" : "unmapped",
+				vma->inuse);
+
+		seq_puts(m, "\n");
+	}
 
 	rcu_read_lock();
 	fobj = rcu_dereference(robj->fence);
@@ -775,9 +833,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 	int count = 0;
 	size_t size = 0;
 
+	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 	list_for_each_entry(msm_obj, list, mm_list) {
 		struct drm_gem_object *obj = &msm_obj->base;
-		seq_printf(m, "   ");
+		seq_puts(m, "   ");
 		msm_gem_describe(obj, m);
 		count++;
 		size += obj->size;
@@ -831,7 +890,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle)
+		uint32_t size, uint32_t flags, uint32_t *handle,
+		char *name)
 {
 	struct drm_gem_object *obj;
 	int ret;
@@ -841,6 +901,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
+	if (name)
+		msm_gem_object_set_name(obj, "%s", name);
+
 	ret = drm_gem_handle_create(file, obj, handle);
 
 	/* drop reference from allocate - handle holds it now */
@@ -864,7 +927,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 	case MSM_BO_WC:
 		break;
 	default:
-		dev_err(dev->dev, "invalid cache flag: %x\n",
+		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
 				(flags & MSM_BO_CACHE_MASK));
 		return -EINVAL;
 	}
@@ -912,9 +975,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
 	size = PAGE_ALIGN(size);
 
-	if (!iommu_present(&platform_bus_type))
+	if (!msm_use_mmu(dev))
 		use_vram = true;
-	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
 		use_vram = true;
 
 	if (WARN_ON(use_vram && !priv->vram.size))
@@ -989,8 +1052,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	int ret, npages;
 
 	/* if we don't have IOMMU, don't bother pretending we can import: */
-	if (!iommu_present(&platform_bus_type)) {
-		dev_err(dev->dev, "cannot import without IOMMU\n");
+	if (!msm_use_mmu(dev)) {
+		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -1040,24 +1103,30 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
 		return ERR_CAST(obj);
 
 	if (iova) {
-		ret = msm_gem_get_iova(obj, aspace, iova);
-		if (ret) {
-			drm_gem_object_put(obj);
-			return ERR_PTR(ret);
-		}
+		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+		if (ret)
+			goto err;
 	}
 
 	vaddr = msm_gem_get_vaddr(obj);
 	if (IS_ERR(vaddr)) {
-		msm_gem_put_iova(obj, aspace);
-		drm_gem_object_put(obj);
-		return ERR_CAST(vaddr);
+		msm_gem_unpin_iova(obj, aspace);
+		ret = PTR_ERR(vaddr);
+		goto err;
 	}
 
 	if (bo)
 		*bo = obj;
 
 	return vaddr;
+err:
+	if (locked)
+		drm_gem_object_put(obj);
+	else
+		drm_gem_object_put_unlocked(obj);
+
+	return ERR_PTR(ret);
+
 }
 
 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1142,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 {
 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
 }
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+		struct msm_gem_address_space *aspace, bool locked)
+{
+	if (IS_ERR_OR_NULL(bo))
+		return;
+
+	msm_gem_put_vaddr(bo);
+	msm_gem_unpin_iova(bo, aspace);
+
+	if (locked)
+		drm_gem_object_put(bo);
+	else
+		drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(bo);
+	va_list ap;
+
+	if (!fmt)
+		return;
+
+	va_start(ap, fmt);
+	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+	va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3..2064fac 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -41,6 +41,8 @@ struct msm_gem_vma {
 	uint64_t iova;
 	struct msm_gem_address_space *aspace;
 	struct list_head list;    /* node in msm_gem_object::vmas */
+	bool mapped;
+	int inuse;
 };
 
 struct msm_gem_object {
@@ -91,6 +93,8 @@ struct msm_gem_object {
 	 */
 	struct drm_mm_node *vram_node;
 	struct mutex lock; /* Protects resources associated with bo */
+
+	char name[32]; /* Identifier to print for the debugfs files */
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
@@ -150,6 +154,7 @@ struct msm_gem_submit {
 	struct msm_ringbuffer *ring;
 	unsigned int nr_cmds;
 	unsigned int nr_bos;
+	u32 ident;	   /* A "identifier" for the submit for logging */
 	struct {
 		uint32_t type;
 		uint32_t size;  /* in dwords */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6942604..a28465d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,6 +20,7 @@
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 
 /*
  * Cmdstream submission:
@@ -48,7 +49,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 	submit->dev = dev;
 	submit->gpu = gpu;
 	submit->fence = NULL;
-	submit->pid = get_pid(task_pid(current));
 	submit->cmd = (void *)&submit->bos[nr_bos];
 	submit->queue = queue;
 	submit->ring = gpu->rb[queue->prio];
@@ -114,8 +114,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
 			pagefault_disable();
 		}
 
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
 		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
-			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+			!(submit_bo.flags & MANDATORY_FLAGS)) {
 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
 			ret = -EINVAL;
 			goto out_unlock;
@@ -167,7 +170,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
 	if (submit->bos[i].flags & BO_PINNED)
-		msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+		msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
 
 	if (submit->bos[i].flags & BO_LOCKED)
 		ww_mutex_unlock(&msm_obj->resv->lock);
@@ -241,7 +244,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 			 * strange place to call it.  OTOH this is a
 			 * convenient can-fail point to hook it in.
 			 */
-			ret = reservation_object_reserve_shared(msm_obj->resv);
+			ret = reservation_object_reserve_shared(msm_obj->resv,
+								1);
 			if (ret)
 				return ret;
 		}
@@ -269,7 +273,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
 		uint64_t iova;
 
 		/* if locking succeeded, pin bo: */
-		ret = msm_gem_get_iova(&msm_obj->base,
+		ret = msm_gem_get_and_pin_iova(&msm_obj->base,
 				submit->gpu->aspace, &iova);
 
 		if (ret)
@@ -408,6 +412,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
+	static atomic_t ident = ATOMIC_INIT(0);
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_msm_gem_submit *args = data;
 	struct msm_file_private *ctx = file->driver_priv;
@@ -417,9 +422,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct msm_gpu_submitqueue *queue;
 	struct msm_ringbuffer *ring;
 	int out_fence_fd = -1;
+	struct pid *pid = get_pid(task_pid(current));
 	unsigned i;
-	int ret;
-
+	int ret, submitid;
 	if (!gpu)
 		return -ENXIO;
 
@@ -442,7 +447,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	if (!queue)
 		return -ENOENT;
 
+	/* Get a unique identifier for the submission for logging purposes */
+	submitid = atomic_inc_return(&ident) - 1;
+
 	ring = gpu->rb[queue->prio];
+	trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+		args->nr_bos, args->nr_cmds);
 
 	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
 		struct dma_fence *in_fence;
@@ -483,6 +493,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
+	submit->pid = pid;
+	submit->ident = submitid;
+
 	if (args->flags & MSM_SUBMIT_SUDO)
 		submit->in_rb = true;
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec22..5573607 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,20 +38,72 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma)
 {
-	if (!aspace || !vma->iova)
+	unsigned size = vma->node.size << PAGE_SHIFT;
+
+	/* Print a message if we try to purge a vma in use */
+	if (WARN_ON(vma->inuse > 0))
 		return;
 
-	if (aspace->mmu) {
-		unsigned size = vma->node.size << PAGE_SHIFT;
-		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
-	}
+	/* Don't do anything if the memory isn't mapped */
+	if (!vma->mapped)
+		return;
+
+	if (aspace->mmu)
+		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+	vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma)
+{
+	if (!WARN_ON(!vma->iova))
+		vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+	unsigned size = npages << PAGE_SHIFT;
+	int ret = 0;
+
+	if (WARN_ON(!vma->iova))
+		return -EINVAL;
+
+	/* Increase the usage counter */
+	vma->inuse++;
+
+	if (vma->mapped)
+		return 0;
+
+	vma->mapped = true;
+
+	if (aspace->mmu)
+		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+				size, IOMMU_READ | IOMMU_WRITE);
+
+	if (ret)
+		vma->mapped = false;
+
+	return ret;
+}
+
+/* Close an iova.  Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma)
+{
+	if (WARN_ON(vma->inuse > 0 || vma->mapped))
+		return;
 
 	spin_lock(&aspace->lock);
-	drm_mm_remove_node(&vma->node);
+	if (vma->iova)
+		drm_mm_remove_node(&vma->node);
 	spin_unlock(&aspace->lock);
 
 	vma->iova = 0;
@@ -59,18 +111,16 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 	msm_gem_address_space_put(aspace);
 }
 
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, int npages)
 {
 	int ret;
 
-	spin_lock(&aspace->lock);
-	if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
-		spin_unlock(&aspace->lock);
-		return 0;
-	}
+	if (WARN_ON(vma->iova))
+		return -EBUSY;
 
+	spin_lock(&aspace->lock);
 	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 	spin_unlock(&aspace->lock);
 
@@ -78,19 +128,14 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
 		return ret;
 
 	vma->iova = vma->node.start << PAGE_SHIFT;
+	vma->mapped = false;
 
-	if (aspace->mmu) {
-		unsigned size = npages << PAGE_SHIFT;
-		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
-				size, IOMMU_READ | IOMMU_WRITE);
-	}
-
-	/* Get a reference to the aspace to keep it around */
 	kref_get(&aspace->kref);
 
-	return ret;
+	return 0;
 }
 
+
 struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name)
@@ -114,3 +159,26 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 
 	return aspace;
 }
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+		const char *name, uint64_t va_start, uint64_t va_end)
+{
+	struct msm_gem_address_space *aspace;
+	u64 size = va_end - va_start;
+
+	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+	if (!aspace)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&aspace->lock);
+	aspace->name = name;
+	aspace->mmu = msm_gpummu_new(dev, gpu);
+
+	drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+		size >> PAGE_SHIFT);
+
+	kref_init(&aspace->kref);
+
+	return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 2b7c894..5f3eff3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -19,6 +19,8 @@
 #include "msm_gem.h"
 #include "msm_mmu.h"
 #include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
 
 #include <generated/utsrelease.h>
 #include <linux/string_helpers.h>
@@ -107,7 +109,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
 			&msm_devfreq_profile, "simple_ondemand", NULL);
 
 	if (IS_ERR(gpu->devfreq.devfreq)) {
-		dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
 		gpu->devfreq.devfreq = NULL;
 	}
 
@@ -122,7 +124,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
 	if (gpu->gpu_reg) {
 		ret = regulator_enable(gpu->gpu_reg);
 		if (ret) {
-			dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
 			return ret;
 		}
 	}
@@ -130,7 +132,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
 	if (gpu->gpu_cx) {
 		ret = regulator_enable(gpu->gpu_cx);
 		if (ret) {
-			dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
 			return ret;
 		}
 	}
@@ -315,28 +317,28 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
 	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 
 	/* Don't record write only objects */
-
 	state_bo->size = obj->base.size;
 	state_bo->iova = iova;
 
-	/* Only store the data for buffer objects marked for read */
-	if ((flags & MSM_SUBMIT_BO_READ)) {
+	/* Only store data for non imported buffer objects marked for read */
+	if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
 		void *ptr;
 
 		state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
 		if (!state_bo->data)
-			return;
+			goto out;
 
 		ptr = msm_gem_get_vaddr_active(&obj->base);
 		if (IS_ERR(ptr)) {
 			kvfree(state_bo->data);
-			return;
+			state_bo->data = NULL;
+			goto out;
 		}
 
 		memcpy(state_bo->data, ptr, obj->base.size);
 		msm_gem_put_vaddr(&obj->base);
 	}
-
+out:
 	state->nr_bos++;
 }
 
@@ -364,12 +366,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 	if (submit) {
 		int i;
 
-		state->bos = kcalloc(submit->nr_bos,
+		state->bos = kcalloc(submit->nr_cmds,
 			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 
-		for (i = 0; state->bos && i < submit->nr_bos; i++)
-			msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
-				submit->bos[i].iova, submit->bos[i].flags);
+		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+			int idx = submit->cmd[i].idx;
+
+			msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+				submit->bos[idx].iova, submit->bos[idx].flags);
+		}
 	}
 
 	/* Set the active crash state to be dumped on failure */
@@ -432,7 +437,7 @@ static void recover_worker(struct work_struct *work)
 
 	mutex_lock(&dev->struct_mutex);
 
-	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 
 	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
 	if (submit) {
@@ -459,7 +464,7 @@ static void recover_worker(struct work_struct *work)
 		}
 
 		if (comm && cmd) {
-			dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
 				gpu->name, comm, cmd);
 
 			msm_rd_dump_submit(priv->hangrd, submit,
@@ -542,11 +547,11 @@ static void hangcheck_handler(struct timer_list *t)
 	} else if (fence < ring->seqno) {
 		/* no progress and not done.. hung! */
 		ring->hangcheck_fence = fence;
-		dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 				gpu->name, ring->id);
-		dev_err(dev->dev, "%s:     completed fence: %u\n",
+		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
 				gpu->name, fence);
-		dev_err(dev->dev, "%s:     submitted fence: %u\n",
+		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
 				gpu->name, ring->seqno);
 
 		queue_work(priv->wq, &gpu->recover_work);
@@ -662,15 +667,33 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
  * Cmdstream submission/retirement:
  */
 
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+		struct msm_gem_submit *submit)
 {
+	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+	volatile struct msm_gpu_submit_stats *stats;
+	u64 elapsed, clock = 0;
 	int i;
 
+	stats = &ring->memptrs->stats[index];
+	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+	do_div(elapsed, 192);
+
+	/* Calculate the clock frequency from the number of CP cycles */
+	if (elapsed) {
+		clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+		do_div(clock, elapsed);
+	}
+
+	trace_msm_gpu_submit_retired(submit, elapsed, clock,
+		stats->alwayson_start, stats->alwayson_end);
+
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 		/* move to inactive: */
 		msm_gem_move_to_inactive(&msm_obj->base);
-		msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+		msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
 		drm_gem_object_put(&msm_obj->base);
 	}
 
@@ -693,7 +716,7 @@ static void retire_submits(struct msm_gpu *gpu)
 
 		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
 			if (dma_fence_is_signaled(submit->fence))
-				retire_submit(gpu, submit);
+				retire_submit(gpu, ring, submit);
 		}
 	}
 }
@@ -754,7 +777,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
 		/* submit takes a reference to the bo and iova until retired: */
 		drm_gem_object_get(&msm_obj->base);
-		msm_gem_get_iova(&msm_obj->base,
+		msm_gem_get_and_pin_iova(&msm_obj->base,
 				submit->gpu->aspace, &iova);
 
 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -803,7 +826,6 @@ static struct msm_gem_address_space *
 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
 		uint64_t va_start, uint64_t va_end)
 {
-	struct iommu_domain *iommu;
 	struct msm_gem_address_space *aspace;
 	int ret;
 
@@ -812,20 +834,27 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
 	 * and have separate page tables per context.  For now, to keep things
 	 * simple and to get something working, just use a single address space:
 	 */
-	iommu = iommu_domain_alloc(&platform_bus_type);
-	if (!iommu)
-		return NULL;
+	if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+		struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+		if (!iommu)
+			return NULL;
 
-	iommu->geometry.aperture_start = va_start;
-	iommu->geometry.aperture_end = va_end;
+		iommu->geometry.aperture_start = va_start;
+		iommu->geometry.aperture_end = va_end;
 
-	dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+		DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
 
-	aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+		aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+		if (IS_ERR(aspace))
+			iommu_domain_free(iommu);
+	} else {
+		aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+			va_start, va_end);
+	}
+
 	if (IS_ERR(aspace)) {
-		dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+		DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
 			PTR_ERR(aspace));
-		iommu_domain_free(iommu);
 		return ERR_CAST(aspace);
 	}
 
@@ -874,14 +903,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 	gpu->irq = platform_get_irq_byname(pdev, config->irqname);
 	if (gpu->irq < 0) {
 		ret = gpu->irq;
-		dev_err(drm->dev, "failed to get irq: %d\n", ret);
+		DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
 		goto fail;
 	}
 
 	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 			IRQF_TRIGGER_HIGH, gpu->name, gpu);
 	if (ret) {
-		dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 		goto fail;
 	}
 
@@ -914,22 +943,25 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		config->va_start, config->va_end);
 
 	if (gpu->aspace == NULL)
-		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 	else if (IS_ERR(gpu->aspace)) {
 		ret = PTR_ERR(gpu->aspace);
 		goto fail;
 	}
 
-	memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+	memptrs = msm_gem_kernel_new(drm,
+		sizeof(struct msm_rbmemptrs) * nr_rings,
 		MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
 		&memptrs_iova);
 
 	if (IS_ERR(memptrs)) {
 		ret = PTR_ERR(memptrs);
-		dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
 		goto fail;
 	}
 
+	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
 	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
 		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
 			ARRAY_SIZE(gpu->rb));
@@ -942,7 +974,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
 		if (IS_ERR(gpu->rb[i])) {
 			ret = PTR_ERR(gpu->rb[i]);
-			dev_err(drm->dev,
+			DRM_DEV_ERROR(drm->dev,
 				"could not create ringbuffer %d: %d\n", i, ret);
 			goto fail;
 		}
@@ -961,11 +993,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 		gpu->rb[i] = NULL;
 	}
 
-	if (gpu->memptrs_bo) {
-		msm_gem_put_vaddr(gpu->memptrs_bo);
-		msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
-		drm_gem_object_put_unlocked(gpu->memptrs_bo);
-	}
+	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 
 	platform_set_drvdata(pdev, NULL);
 	return ret;
@@ -984,11 +1012,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
 		gpu->rb[i] = NULL;
 	}
 
-	if (gpu->memptrs_bo) {
-		msm_gem_put_vaddr(gpu->memptrs_bo);
-		msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
-		drm_gem_object_put_unlocked(gpu->memptrs_bo);
-	}
+	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 
 	if (!IS_ERR_OR_NULL(gpu->aspace)) {
 		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index f82bac0..efb49bb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -187,6 +187,7 @@ struct msm_gpu_state_bo {
 	u64 iova;
 	size_t size;
 	void *data;
+	bool encoded;
 };
 
 struct msm_gpu_state {
@@ -201,6 +202,7 @@ struct msm_gpu_state {
 		u32 wptr;
 		void *data;
 		int data_size;
+		bool encoded;
 	} ring[MSM_GPU_MAX_RINGS];
 
 	int nr_registers;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 0000000..1155118
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+	    TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+	    TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+	    TP_STRUCT__entry(
+		    __field(pid_t, pid)
+		    __field(u32, id)
+		    __field(u32, ringid)
+		    __field(u32, nr_cmds)
+		    __field(u32, nr_bos)
+		    ),
+	    TP_fast_assign(
+		    __entry->pid = pid;
+		    __entry->id = id;
+		    __entry->ringid = ringid;
+		    __entry->nr_bos = nr_bos;
+		    __entry->nr_cmds = nr_cmds
+		    ),
+	    TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+		    __entry->id, __entry->pid, __entry->ringid,
+		    __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+	    TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+	    TP_ARGS(submit, ticks),
+	    TP_STRUCT__entry(
+		    __field(pid_t, pid)
+		    __field(u32, id)
+		    __field(u32, ringid)
+		    __field(u32, seqno)
+		    __field(u64, ticks)
+		    ),
+	    TP_fast_assign(
+		    __entry->pid = pid_nr(submit->pid);
+		    __entry->id = submit->ident;
+		    __entry->ringid = submit->ring->id;
+		    __entry->seqno = submit->seqno;
+		    __entry->ticks = ticks;
+		    ),
+	    TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+		    __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+		    __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+	    TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+		    u64 start, u64 end),
+	    TP_ARGS(submit, elapsed, clock, start, end),
+	    TP_STRUCT__entry(
+		    __field(pid_t, pid)
+		    __field(u32, id)
+		    __field(u32, ringid)
+		    __field(u32, seqno)
+		    __field(u64, elapsed)
+		    __field(u64, clock)
+		    __field(u64, start_ticks)
+		    __field(u64, end_ticks)
+		    ),
+	    TP_fast_assign(
+		    __entry->pid = pid_nr(submit->pid);
+		    __entry->id = submit->ident;
+		    __entry->ringid = submit->ring->id;
+		    __entry->seqno = submit->seqno;
+		    __entry->elapsed = elapsed;
+		    __entry->clock = clock;
+		    __entry->start_ticks = start;
+		    __entry->end_ticks = end;
+		    ),
+	    TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+		    __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+		    __entry->elapsed, __entry->clock,
+		    __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 0000000..72c074f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 0000000..27312b5
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+	struct msm_mmu base;
+	struct msm_gpu *gpu;
+	dma_addr_t pt_base;
+	uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+		int cnt)
+{
+	return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+		int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+		struct sg_table *sgt, unsigned len, int prot)
+{
+	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+	struct scatterlist *sg;
+	unsigned prot_bits = 0;
+	unsigned i, j;
+
+	if (prot & IOMMU_WRITE)
+		prot_bits |= 1;
+	if (prot & IOMMU_READ)
+		prot_bits |= 2;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		dma_addr_t addr = sg->dma_address;
+		for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+			gpummu->table[idx] = addr | prot_bits;
+			addr += GPUMMU_PAGE_SIZE;
+		}
+	}
+
+	/* we can improve by deferring flush for multiple map() */
+	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+	return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+	unsigned i;
+
+	for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+                gpummu->table[idx] = 0;
+
+	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+	return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+	dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+		DMA_ATTR_FORCE_CONTIGUOUS);
+
+	kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+		.attach = msm_gpummu_attach,
+		.detach = msm_gpummu_detach,
+		.map = msm_gpummu_map,
+		.unmap = msm_gpummu_unmap,
+		.destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+	struct msm_gpummu *gpummu;
+
+	gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+	if (!gpummu)
+		return ERR_PTR(-ENOMEM);
+
+	gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+		GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+	if (!gpummu->table) {
+		kfree(gpummu);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	gpummu->gpu = gpu;
+	msm_mmu_init(&gpummu->base, dev, &funcs);
+
+	return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+		dma_addr_t *tran_error)
+{
+	dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+	*pt_base = base;
+	*tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a90aa4..4d62790 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -71,8 +71,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
 	return (ret == len) ? 0 : -EINVAL;
 }
 
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
-		struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index fd88ceb..2b81b43 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -67,9 +67,6 @@ struct msm_kms_funcs {
 	void (*set_encoder_mode)(struct msm_kms *kms,
 				 struct drm_encoder *encoder,
 				 bool cmd_mode);
-	/* pm suspend/resume hooks */
-	int (*pm_suspend)(struct device *dev);
-	int (*pm_resume)(struct device *dev);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4..d21b266 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -25,8 +25,7 @@ struct msm_mmu_funcs {
 	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
 			unsigned len, int prot);
-	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
-			unsigned len);
+	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
 	void (*destroy)(struct msm_mmu *mmu);
 };
 
@@ -54,4 +53,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
 	mmu->handler = handler;
 }
 
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+		dma_addr_t *tran_error);
+
 #endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0c2c8d2..90e9d0a 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -348,6 +348,12 @@ static void snapshot_buf(struct msm_rd_state *rd,
 	msm_gem_put_vaddr(&obj->base);
 }
 
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...)
@@ -389,15 +395,16 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 
 	rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
 
-	for (i = 0; rd_full && i < submit->nr_bos; i++)
-		snapshot_buf(rd, submit, i, 0, 0);
+	for (i = 0; i < submit->nr_bos; i++)
+		if (should_dump(submit, i))
+			snapshot_buf(rd, submit, i, 0, 0);
 
 	for (i = 0; i < submit->nr_cmds; i++) {
 		uint64_t iova = submit->cmd[i].iova;
 		uint32_t szd  = submit->cmd[i].size; /* in dwords */
 
 		/* snapshot cmdstream bo's (if we haven't already): */
-		if (!rd_full) {
+		if (!should_dump(submit, i)) {
 			snapshot_buf(rd, submit, submit->cmd[i].idx,
 					submit->cmd[i].iova, szd * 4);
 		}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6f5295b..20a96fe 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -36,15 +36,18 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
 
 	ring->gpu = gpu;
 	ring->id = id;
-	/* Pass NULL for the iova pointer - we will map it later */
+
 	ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
-		MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+		MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
 
 	if (IS_ERR(ring->start)) {
 		ret = PTR_ERR(ring->start);
 		ring->start = 0;
 		goto fail;
 	}
+
+	msm_gem_object_set_name(ring->bo, "ring%d", id);
+
 	ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
 	ring->next  = ring->start;
 	ring->cur   = ring->start;
@@ -73,10 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 
 	msm_fence_context_free(ring->fctx);
 
-	if (ring->bo) {
-		msm_gem_put_iova(ring->bo, ring->gpu->aspace);
-		msm_gem_put_vaddr(ring->bo);
-		drm_gem_object_put_unlocked(ring->bo);
-	}
+	msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
 	kfree(ring);
 }
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index cffce09..6434ebb 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -23,9 +23,25 @@
 #define rbmemptr(ring, member)  \
 	((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
 
+#define rbmemptr_stats(ring, index, member) \
+	(rbmemptr((ring), stats) + \
+	 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+	 offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+	u64 cpcycles_start;
+	u64 cpcycles_end;
+	u64 alwayson_start;
+	u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
 struct msm_rbmemptrs {
 	volatile uint32_t rptr;
 	volatile uint32_t fence;
+
+	volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
 };
 
 struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 2393e6d1..88ba003 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -417,7 +417,7 @@ static int mxsfb_probe(struct platform_device *pdev)
 err_unload:
 	mxsfb_unload(drm);
 err_free:
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return ret;
 }
@@ -428,7 +428,7 @@ static int mxsfb_remove(struct platform_device *pdev)
 
 	drm_dev_unregister(drm);
 	mxsfb_unload(drm);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 70dce54..1727d39 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -67,7 +67,7 @@ nv04_display_create(struct drm_device *dev)
 	for (i = 0; i < dcb->entries; i++) {
 		struct dcb_output *dcbent = &dcb->entry[i];
 
-		connector = nouveau_connector_create(dev, dcbent->connector);
+		connector = nouveau_connector_create(dev, dcbent);
 		if (IS_ERR(connector))
 			continue;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 849b0f4..3d074aa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -7,6 +7,7 @@
 nouveau-y += dispnv50/core907d.o
 nouveau-y += dispnv50/core917d.o
 nouveau-y += dispnv50/corec37d.o
+nouveau-y += dispnv50/corec57d.o
 
 nouveau-y += dispnv50/dac507d.o
 nouveau-y += dispnv50/dac907d.o
@@ -23,12 +24,14 @@
 nouveau-y += dispnv50/head907d.o
 nouveau-y += dispnv50/head917d.o
 nouveau-y += dispnv50/headc37d.o
+nouveau-y += dispnv50/headc57d.o
 
 nouveau-y += dispnv50/wimm.o
 nouveau-y += dispnv50/wimmc37b.o
 
 nouveau-y += dispnv50/wndw.o
 nouveau-y += dispnv50/wndwc37e.o
+nouveau-y += dispnv50/wndwc57e.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 908feb1..a194990 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -54,9 +54,10 @@ struct nv50_head_atom {
 		u64 offset:40;
 		u8 buffer:1;
 		u8 mode:4;
-		u8 size:2;
+		u16 size:11;
 		u8 range:2;
 		u8 output_mode:2;
+		void (*load)(struct drm_color_lut *, int size, void __iomem *);
 	} olut;
 
 	struct {
@@ -169,9 +170,11 @@ struct nv50_wndw_atom {
 			u8  buffer:1;
 			u8  enable:2;
 			u8  mode:4;
-			u8  size:2;
+			u16 size:11;
 			u8  range:2;
 			u8  output_mode:2;
+			void (*load)(struct drm_color_lut *, int size,
+				     void __iomem *);
 		} i;
 	} xlut;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index a562fc9..049ce6d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -80,6 +80,7 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	asyw->xlut.i.mode = 7;
 	asyw->xlut.i.enable = 2;
+	asyw->xlut.i.load = head907d_olut_load;
 }
 
 const struct nv50_wndw_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f3c49ad..c25e0eb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
 		int version;
 		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
 	} cores[] = {
+		{ TU104_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
 		{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
 		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
 		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 8470df9..df8336b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -46,5 +46,9 @@ extern const struct nv50_outp_func sor907d;
 int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+void corec37d_update(struct nv50_core *, u32 *, bool);
 extern const struct nv50_outp_func sorc37d;
+
+int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index b5c17c9..7860774 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,7 +24,7 @@
 
 #include <nouveau_bo.h>
 
-static void
+void
 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
 	u32 *push;
@@ -71,7 +71,7 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
 	nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
 }
 
-void
+static void
 corec37d_init(struct nv50_core *core)
 {
 	const u32 windows = 8; /*XXX*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
new file mode 100644
index 0000000..b606d68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static void
+corec57d_init(struct nv50_core *core)
+{
+	const u32 windows = 8; /*XXX*/
+	u32 *push, i;
+	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+		evo_mthd(push, 0x0208, 1);
+		evo_data(push, core->chan.sync.handle);
+		for (i = 0; i < windows; i++) {
+			evo_mthd(push, 0x1000 + (i * 0x080), 3);
+			evo_data(push, i >> 1);
+			evo_data(push, 0x0000000f);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x1010 + (i * 0x080), 1);
+			evo_data(push, 0x00117fff);
+		}
+		evo_mthd(push, 0x0200, 1);
+		evo_data(push, 0x00000001);
+		evo_kick(push, &core->chan);
+	}
+}
+
+static const struct nv50_core_func
+corec57d = {
+	.init = corec57d_init,
+	.ntfy_init = corec37d_ntfy_init,
+	.ntfy_wait_done = corec37d_ntfy_wait_done,
+	.update = corec37d_update,
+	.head = &headc57d,
+	.sor = &sorc37d,
+};
+
+int
+corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&corec57d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index f592087..cb6e4d2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} curses[] = {
+		{ TU104_DISP_CURSOR, 0, cursc37a_new },
 		{ GV100_DISP_CURSOR, 0, cursc37a_new },
 		{ GK104_DISP_CURSOR, 0, curs907a_new },
 		{ GF110_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index db1bf7f..134701a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1262,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
 static void
 nv50_mstm_init(struct nv50_mstm *mstm)
 {
-	if (mstm && mstm->mgr.mst_state)
-		drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	int ret;
+
+	if (!mstm || !mstm->mgr.mst_state)
+		return;
+
+	ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	if (ret == -1) {
+		drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+		drm_kms_helper_hotplug_event(mstm->mgr.dev);
+	}
 }
 
 static void
@@ -2301,7 +2309,7 @@ nv50_display_create(struct drm_device *dev)
 
 	/* create encoder/connector objects based on VBIOS DCB table */
 	for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
-		connector = nouveau_connector_create(dev, dcbe->connector);
+		connector = nouveau_connector_create(dev, dcbe);
 		if (IS_ERR(connector))
 			continue;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index e48c5eb..2216c58 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -45,6 +45,8 @@ struct nv50_disp_interlock {
 
 void corec37d_ntfy_init(struct nouveau_bo *, u32);
 
+void head907d_olut_load(struct drm_color_lut *, int size, void __iomem *);
+
 struct nv50_chan {
 	struct nvif_object user;
 	struct nvif_device *device;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 4f57e53..ac97ebce 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,9 +50,9 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	if (asyh->set.core   ) head->func->core_set(head, asyh);
 	if (asyh->set.olut   ) {
 		asyh->olut.offset = nv50_lut_load(&head->olut,
-						  asyh->olut.mode <= 1,
 						  asyh->olut.buffer,
-						  asyh->state.gamma_lut);
+						  asyh->state.gamma_lut,
+						  asyh->olut.load);
 		head->func->olut_set(head, asyh);
 	}
 	if (asyh->set.curs   ) head->func->curs_set(head, asyh);
@@ -210,7 +210,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
 		}
 	}
 
-	if (!olut) {
+	if (!olut && !head->func->olut_identity) {
 		asyh->olut.handle = 0;
 		return 0;
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 37b3248..d1c002f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head_func {
 	void (*view)(struct nv50_head *, struct nv50_head_atom *);
 	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
 	void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+	bool olut_identity;
 	void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*olut_clr)(struct nv50_head *);
 	void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -75,4 +76,14 @@ int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
 			 struct nv50_head_atom *);
 
 extern const struct nv50_head_func headc37d;
+void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_clr(struct nv50_head *);
+int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+			 struct nv50_head_atom *);
+void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_curs_clr(struct nv50_head *);
+void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc57d;
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 51bc599..7561be5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -254,6 +254,23 @@ head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
+static void
+head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+	for (; size--; in++, mem += 8) {
+		writew(drm_color_lut_extract(in->  red, 11) << 3, mem + 0);
+		writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
+		writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(readw(mem - 8), mem + 0);
+	writew(readw(mem - 6), mem + 2);
+	writew(readw(mem - 4), mem + 4);
+}
+
 void
 head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
@@ -261,6 +278,8 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 		asyh->olut.mode = 0;
 	else
 		asyh->olut.mode = 1;
+
+	asyh->olut.load = head507d_olut_load;
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 6339071..c2d09dd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -214,9 +214,27 @@ head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 }
 
 void
+head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+	for (; size--; in++, mem += 8) {
+		writew(drm_color_lut_extract(in->  red, 14) + 0x6000, mem + 0);
+		writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
+		writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(readw(mem - 8), mem + 0);
+	writew(readw(mem - 6), mem + 2);
+	writew(readw(mem - 4), mem + 4);
+}
+
+void
 head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	asyh->olut.mode = 7;
+	asyh->olut.load = head907d_olut_load;
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 989c140..ef6a99d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -65,7 +65,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static void
+void
 headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -79,7 +79,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static void
+void
 headc37d_curs_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -93,7 +93,7 @@ headc37d_curs_clr(struct nv50_head *head)
 	}
 }
 
-static void
+void
 headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -112,7 +112,7 @@ headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static int
+int
 headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
 		     struct nv50_head_atom *asyh)
 {
@@ -155,6 +155,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 	asyh->olut.size = 0;
 	asyh->olut.range = 0;
 	asyh->olut.output_mode = 1;
+	asyh->olut.load = head907d_olut_load;
 }
 
 static void
@@ -181,7 +182,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static void
+void
 headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
new file mode 100644
index 0000000..32a7f9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+static void
+headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		/*XXX: This is a dirty hack until OR depth handling is
+		 *     improved later for deep colour etc.
+		 */
+		switch (asyh->or.depth) {
+		case 6: asyh->or.depth = 5; break;
+		case 5: asyh->or.depth = 4; break;
+		case 2: asyh->or.depth = 1; break;
+		case 0:	asyh->or.depth = 4; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+
+		evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
+		evo_data(push, 0xfc000001 |
+			       asyh->or.depth << 4 |
+			       asyh->or.nvsync << 3 |
+			       asyh->or.nhsync << 2);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
+#if 0
+		evo_data(push, 0x80000000 |
+			       asyh->procamp.sat.sin << 16 |
+			       asyh->procamp.sat.cos << 4);
+#else
+		evo_data(push, 0);
+#endif
+		evo_kick(push, core);
+	}
+}
+
+void
+headc57d_olut_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+void
+headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
+		evo_data(push, asyh->olut.size << 8 |
+			       asyh->olut.mode << 2 |
+			       asyh->olut.output_mode);
+		evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
+		evo_data(push, asyh->olut.handle);
+		evo_data(push, asyh->olut.offset >> 8);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+	memset_io(mem, 0x00, 0x20); /* VSS header. */
+	mem += 0x20;
+
+	while (size--) {
+		u16 r = drm_color_lut_extract(in->  red + 0, 16);
+		u16 g = drm_color_lut_extract(in->green + 0, 16);
+		u16 b = drm_color_lut_extract(in-> blue + 0, 16);
+		u16 ri = 0, gi = 0, bi = 0, i;
+
+		if (in++, size) {
+			ri = (drm_color_lut_extract(in->  red, 16) - r) / 4;
+			gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
+			bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
+		}
+
+		for (i = 0; i < 4; i++, mem += 8) {
+			writew(r + ri * i, mem + 0);
+			writew(g + gi * i, mem + 2);
+			writew(b + bi * i, mem + 4);
+		}
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(readw(mem - 8), mem + 0);
+	writew(readw(mem - 6), mem + 2);
+	writew(readw(mem - 4), mem + 4);
+}
+
+static void
+headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+	memset_io(mem, 0x00, 0x20); /* VSS header. */
+	mem += 0x20;
+
+	for (; size--; in++, mem += 0x08) {
+		writew(drm_color_lut_extract(in->  red, 16), mem + 0);
+		writew(drm_color_lut_extract(in->green, 16), mem + 2);
+		writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(readw(mem - 8), mem + 0);
+	writew(readw(mem - 6), mem + 2);
+	writew(readw(mem - 4), mem + 4);
+}
+
+void
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	asyh->olut.mode = 2; /* DIRECT10 */
+	asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
+	asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+	if (asyh->state.gamma_lut &&
+	    asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+		asyh->olut.load = headc57d_olut_load_8;
+	else
+		asyh->olut.load = headc57d_olut_load;
+}
+
+static void
+headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	struct nv50_head_mode *m = &asyh->mode;
+	u32 *push;
+	if ((push = evo_wait(core, 12))) {
+		evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
+		evo_data(push, (m->v.active  << 16) | m->h.active );
+		evo_data(push, (m->v.synce   << 16) | m->h.synce  );
+		evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
+		evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
+		evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+		evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+		evo_data(push, m->clock * 1000);
+		evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
+		evo_data(push, m->clock * 1000);
+		/*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+		evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00001014);
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+headc57d = {
+	.view = headc37d_view,
+	.mode = headc57d_mode,
+	.olut = headc57d_olut,
+	.olut_identity = true,
+	.olut_set = headc57d_olut_set,
+	.olut_clr = headc57d_olut_clr,
+	.curs_layout = head917d_curs_layout,
+	.curs_format = headc37d_curs_format,
+	.curs_set = headc37d_curs_set,
+	.curs_clr = headc37d_curs_clr,
+	.dither = headc37d_dither,
+	.procamp = headc57d_procamp,
+	.or = headc57d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index a6b96ae2..994def4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -29,45 +29,29 @@
 #include <nvif/class.h>
 
 u32
-nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
-	      struct drm_property_blob *blob)
+nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
+	      void (*load)(struct drm_color_lut *, int, void __iomem *))
 {
-	struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+	struct drm_color_lut *in = blob ? blob->data : NULL;
 	void __iomem *mem = lut->mem[buffer].object.map.ptr;
-	const int size = blob->length / sizeof(*in);
-	int bits, shift, i;
-	u16 zero, r, g, b;
-	u32 addr = lut->mem[buffer].addr;
+	const u32 addr = lut->mem[buffer].addr;
+	int i;
 
-	/* This can't happen.. But it shuts the compiler up. */
-	if (WARN_ON(size != 256))
-		return 0;
-
-	if (legacy) {
-		bits = 11;
-		shift = 3;
-		zero = 0x0000;
+	if (!in) {
+		in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
+		if (!WARN_ON(!in)) {
+			for (i = 0; i < 1024; i++) {
+				in[i].red   =
+				in[i].green =
+				in[i].blue  = (i << 16) >> 10;
+			}
+			load(in, 1024, mem);
+			kvfree(in);
+		}
 	} else {
-		bits = 14;
-		shift = 0;
-		zero = 0x6000;
+		load(in, blob->length / sizeof(*in), mem);
 	}
 
-	for (i = 0; i < size; i++) {
-		r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
-		g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
-		b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
-		writew(r, mem + (i * 0x08) + 0);
-		writew(g, mem + (i * 0x08) + 2);
-		writew(b, mem + (i * 0x08) + 4);
-	}
-
-	/* INTERPOLATE modes require a "next" entry to interpolate with,
-	 * so we replicate the last entry to deal with this for now.
-	 */
-	writew(r, mem + (i * 0x08) + 0);
-	writew(g, mem + (i * 0x08) + 2);
-	writew(b, mem + (i * 0x08) + 4);
 	return addr;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
index 6d7b835..b3b9040 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -2,6 +2,7 @@
 #define __NV50_KMS_LUT_H__
 #include <nvif/mem.h>
 struct drm_property_blob;
+struct drm_color_lut;
 struct nv50_disp;
 
 struct nv50_lut {
@@ -10,6 +11,6 @@ struct nv50_lut {
 
 int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
 void nv50_lut_fini(struct nv50_lut *);
-u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
-		  struct drm_property_blob *);
+u32 nv50_lut_load(struct nv50_lut *, int buffer, struct drm_property_blob *,
+		  void (*)(struct drm_color_lut *, int size, void __iomem *));
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index fc36e06..bc9eeaf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
 		int version;
 		int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
 	} wimms[] = {
+		{ TU104_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
 		{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
 		{}
 	};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 2187922..ba9eea2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -139,10 +139,8 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 	if (asyw->set.xlut ) {
 		if (asyw->ilut) {
 			asyw->xlut.i.offset =
-				nv50_lut_load(&wndw->ilut,
-					      asyw->xlut.i.mode <= 1,
-					      asyw->xlut.i.buffer,
-					      asyw->ilut);
+				nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
+					      asyw->ilut, asyw->xlut.i.load);
 		}
 		wndw->func->xlut_set(wndw, asyw);
 	}
@@ -322,6 +320,11 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
 		asyh->wndw.olut &= ~BIT(wndw->id);
 	}
 
+	if (!ilut && wndw->func->ilut_identity) {
+		static struct drm_property_blob dummy = {};
+		ilut = &dummy;
+	}
+
 	/* Recalculate LUT state. */
 	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
 	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
@@ -623,6 +626,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
 		int (*new)(struct nouveau_drm *, enum drm_plane_type,
 			   int, s32, struct nv50_wndw **);
 	} wndws[] = {
+		{ TU104_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
 		{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
 		{}
 	};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index b0b6428..03f3d8d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -65,6 +65,7 @@ struct nv50_wndw_func {
 	int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
 			       struct nvif_device *);
 	void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	bool ilut_identity;
 	bool olut_core;
 	void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*xlut_clr)(struct nv50_wndw *);
@@ -90,6 +91,23 @@ extern const struct nv50_wimm_func curs507a;
 
 int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
 		 struct nv50_wndw **);
+int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
+		  enum drm_plane_type type, int index, s32 oclass, u32 heads,
+		  struct nv50_wndw **);
+int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+		     struct nv50_head_atom *);
+void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+		      struct nv50_head_atom *);
+void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_sema_clr(struct nv50_wndw *);
+void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_ntfy_clr(struct nv50_wndw *);
+void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_update(struct nv50_wndw *, u32 *);
+
+int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+		 struct nv50_wndw **);
 
 int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
 		  struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 44afb0f..e52a85c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -61,9 +61,10 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	asyw->xlut.i.size = 0;
 	asyw->xlut.i.range = 0;
 	asyw->xlut.i.output_mode = 1;
+	asyw->xlut.i.load = head907d_olut_load;
 }
 
-static void
+void
 wndwc37e_image_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -76,7 +77,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
 	}
 }
 
-static void
+void
 wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -117,7 +118,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	evo_kick(push, &wndw->wndw);
 }
 
-static void
+void
 wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -128,7 +129,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
 	}
 }
 
-static void
+void
 wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -140,7 +141,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
+void
 wndwc37e_sema_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -151,7 +152,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
 	}
 }
 
-static void
+void
 wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -165,7 +166,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
+void
 wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
 {
 	u32 *push;
@@ -183,13 +184,13 @@ wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
 	}
 }
 
-static void
+void
 wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 		 struct nv50_head_atom *asyh)
 {
 }
 
-static int
+int
 wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 		 struct nv50_head_atom *asyh)
 {
@@ -236,7 +237,7 @@ wndwc37e = {
 	.update = wndwc37e_update,
 };
 
-static int
+int
 wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
 	      enum drm_plane_type type, int index, s32 oclass, u32 heads,
 	      struct nv50_wndw **pwndw)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
new file mode 100644
index 0000000..ba89f1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/clc37e.h>
+
+static void
+wndwc57e_ilut_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0444, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x0440, 3);
+		evo_data(push, asyw->xlut.i.size << 8 |
+			       asyw->xlut.i.mode << 2 |
+			       asyw->xlut.i.output_mode);
+		evo_data(push, asyw->xlut.handle);
+		evo_data(push, asyw->xlut.i.offset >> 8);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static u16
+fixedU0_16_FP16(u16 fixed)
+{
+        int sign = 0, exp = 0, man = 0;
+        if (fixed) {
+                while (--exp && !(fixed & 0x8000))
+                        fixed <<= 1;
+                man = ((fixed << 1) & 0xffc0) >> 6;
+                exp += 15;
+        }
+        return (sign << 15) | (exp << 10) | man;
+}
+
+static void
+wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+	memset_io(mem, 0x00, 0x20); /* VSS header. */
+	mem += 0x20;
+
+	for (; size--; in++, mem += 0x08) {
+		u16 r = fixedU0_16_FP16(drm_color_lut_extract(in->  red, 16));
+		u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
+		u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
+		writew(r, mem + 0);
+		writew(g, mem + 2);
+		writew(b, mem + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(readw(mem - 8), mem + 0);
+	writew(readw(mem - 6), mem + 2);
+	writew(readw(mem - 4), mem + 4);
+}
+
+static void
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+	if (size == 256) {
+		asyw->xlut.i.mode = 1; /* DIRECT8. */
+	} else {
+		asyw->xlut.i.mode = 2; /* DIRECT10. */
+		size = 1024;
+	}
+	asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
+	asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+	asyw->xlut.i.load = wndwc57e_ilut_load;
+}
+
+static const struct nv50_wndw_func
+wndwc57e = {
+	.acquire = wndwc37e_acquire,
+	.release = wndwc37e_release,
+	.sema_set = wndwc37e_sema_set,
+	.sema_clr = wndwc37e_sema_clr,
+	.ntfy_set = wndwc37e_ntfy_set,
+	.ntfy_clr = wndwc37e_ntfy_clr,
+	.ntfy_reset = corec37d_ntfy_init,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.ilut = wndwc57e_ilut,
+	.ilut_identity = true,
+	.xlut_set = wndwc57e_ilut_set,
+	.xlut_clr = wndwc57e_ilut_clr,
+	.image_set = wndwc37e_image_set,
+	.image_clr = wndwc37e_image_clr,
+	.update = wndwc37e_update,
+};
+
+int
+wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+	     s32 oclass, struct nv50_wndw **pwndw)
+{
+	return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
+			     BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 4f52331..4cbed03 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -32,6 +32,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_MAXWELL                                          0x09
 #define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
+#define NV_DEVICE_INFO_V0_TURING                                           0x0c
 	__u8  family;
 	__u8  pad06[2];
 	__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index fbfcffc..81401eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -4,12 +4,13 @@
 
 struct kepler_channel_gpfifo_a_v0 {
 	__u8  version;
-	__u8  pad01[1];
+	__u8  priv;
 	__u16 chid;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 runlist;
 	__u64 vmm;
+	__u64 inst;
 };
 
 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 6db56bd..1d82cbf 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -68,7 +68,8 @@
 #define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
-#define VOLTA_CHANNEL_GPFIFO_A                        /* cla06f.h */ 0x0000c36f
+#define VOLTA_CHANNEL_GPFIFO_A                        /* clc36f.h */ 0x0000c36f
+#define TURING_CHANNEL_GPFIFO_A                       /* clc36f.h */ 0x0000c46f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
@@ -83,6 +84,7 @@
 #define GP100_DISP                                    /* cl5070.h */ 0x00009770
 #define GP102_DISP                                    /* cl5070.h */ 0x00009870
 #define GV100_DISP                                    /* cl5070.h */ 0x0000c370
+#define TU104_DISP                                    /* cl5070.h */ 0x0000c570
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -95,6 +97,7 @@
 #define GF110_DISP_CURSOR                             /* cl507a.h */ 0x0000907a
 #define GK104_DISP_CURSOR                             /* cl507a.h */ 0x0000917a
 #define GV100_DISP_CURSOR                             /* cl507a.h */ 0x0000c37a
+#define TU104_DISP_CURSOR                             /* cl507a.h */ 0x0000c57a
 
 #define NV50_DISP_OVERLAY                             /* cl507b.h */ 0x0000507b
 #define G82_DISP_OVERLAY                              /* cl507b.h */ 0x0000827b
@@ -103,6 +106,7 @@
 #define GK104_DISP_OVERLAY                            /* cl507b.h */ 0x0000917b
 
 #define GV100_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c37b
+#define TU104_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c57b
 
 #define NV50_DISP_BASE_CHANNEL_DMA                    /* cl507c.h */ 0x0000507c
 #define G82_DISP_BASE_CHANNEL_DMA                     /* cl507c.h */ 0x0000827c
@@ -125,6 +129,7 @@
 #define GP100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000977d
 #define GP102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c37d
+#define TU104_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c57d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
@@ -134,6 +139,7 @@
 #define GK104_DISP_OVERLAY_CONTROL_DMA                /* cl507e.h */ 0x0000917e
 
 #define GV100_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c37e
+#define TU104_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c57e
 
 #define NV50_TESLA                                                   0x00005097
 #define G82_TESLA                                                    0x00008297
@@ -183,6 +189,7 @@
 #define PASCAL_DMA_COPY_A                                            0x0000c0b5
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
 #define VOLTA_DMA_COPY_A                                             0x0000c3b5
+#define TURING_DMA_COPY_A                                            0x0000c5b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
new file mode 100644
index 0000000..6b14d7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC36F_H__
+#define __NVIF_CLC36F_H__
+
+struct volta_channel_gpfifo_a_v0 {
+	__u8  version;
+	__u8  priv;
+	__u16 chid;
+	__u32 ilength;
+	__u64 ioffset;
+	__u64 runlist;
+	__u64 vmm;
+	__u64 inst;
+	__u32 token;
+};
+
+#define NVC36F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
+#define NVC36F_V0_NTFY_KILLED                                              0x01
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d83d834..72e4dc1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -61,7 +61,11 @@ enum nvkm_devidx {
 	NVKM_ENGINE_NVENC2,
 	NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
 
-	NVKM_ENGINE_NVDEC,
+	NVKM_ENGINE_NVDEC0,
+	NVKM_ENGINE_NVDEC1,
+	NVKM_ENGINE_NVDEC2,
+	NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
+
 	NVKM_ENGINE_PM,
 	NVKM_ENGINE_SEC,
 	NVKM_ENGINE_SEC2,
@@ -114,6 +118,7 @@ struct nvkm_device {
 		GM100    = 0x110,
 		GP100    = 0x130,
 		GV100    = 0x140,
+		TU100    = 0x160,
 	} card_type;
 	u32 chipset;
 	u8  chiprev;
@@ -163,7 +168,7 @@ struct nvkm_device {
 	struct nvkm_engine *msppp;
 	struct nvkm_engine *msvld;
 	struct nvkm_engine *nvenc[3];
-	struct nvkm_nvdec *nvdec;
+	struct nvkm_nvdec *nvdec[3];
 	struct nvkm_pm *pm;
 	struct nvkm_engine *sec;
 	struct nvkm_sec2 *sec2;
@@ -235,7 +240,7 @@ struct nvkm_device_chip {
 	int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*nvdec   )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
+	int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
 	int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
 	int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*sec2    )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 05f505d..f34c803 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -29,6 +29,7 @@ struct nvkm_memory_func {
 	void *(*dtor)(struct nvkm_memory *);
 	enum nvkm_memory_target (*target)(struct nvkm_memory *);
 	u8 (*page)(struct nvkm_memory *);
+	u64 (*bar2)(struct nvkm_memory *);
 	u64 (*addr)(struct nvkm_memory *);
 	u64 (*size)(struct nvkm_memory *);
 	void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
@@ -56,6 +57,7 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
 
 #define nvkm_memory_target(p) (p)->func->target(p)
 #define nvkm_memory_page(p) (p)->func->page(p)
+#define nvkm_memory_bar2(p) (p)->func->bar2(p)
 #define nvkm_memory_addr(p) (p)->func->addr(p)
 #define nvkm_memory_size(p) (p)->func->size(p)
 #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index fc295e1..86abe76 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -11,4 +11,5 @@ int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int tu104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index ef7dc08..5ca86e17 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -36,4 +36,5 @@ int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int tu104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 7e39fbe..3b2b685 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -74,4 +74,5 @@ int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int tu104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index f6bd94c..fd9d713 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -16,8 +16,10 @@ struct nvkm_bar {
 };
 
 struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar1_reset(struct nvkm_device *);
 void nvkm_bar_bar2_init(struct nvkm_device *);
 void nvkm_bar_bar2_fini(struct nvkm_device *);
+void nvkm_bar_bar2_reset(struct nvkm_device *);
 struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
 void nvkm_bar_flush(struct nvkm_bar *);
 
@@ -27,4 +29,5 @@ int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int tu104_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index 703a5b5..425ccc4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -12,11 +12,14 @@ u32 nvbios_M0203Tp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 		   struct nvbios_M0203T *);
 
 struct nvbios_M0203E {
-#define M0203E_TYPE_DDR2  0x0
-#define M0203E_TYPE_DDR3  0x1
-#define M0203E_TYPE_GDDR3 0x2
-#define M0203E_TYPE_GDDR5 0x3
-#define M0203E_TYPE_SKIP  0xf
+#define M0203E_TYPE_DDR2   0x0
+#define M0203E_TYPE_DDR3   0x1
+#define M0203E_TYPE_GDDR3  0x2
+#define M0203E_TYPE_GDDR5  0x3
+#define M0203E_TYPE_HBM2   0x6
+#define M0203E_TYPE_GDDR5X 0x8
+#define M0203E_TYPE_GDDR6  0x9
+#define M0203E_TYPE_SKIP   0xf
 	u8 type;
 	u8 strap;
 	u8 group;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index ed9e0a6..8463b42 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -20,6 +20,7 @@ enum dcb_connector_type {
 	DCB_CONNECTOR_DMS59_DP0 = 0x64,
 	DCB_CONNECTOR_DMS59_DP1 = 0x65,
 	DCB_CONNECTOR_WFD	= 0x70,
+	DCB_CONNECTOR_USB_C = 0x71,
 	DCB_CONNECTOR_NONE = 0xff
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 486e763..1b71812 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -31,4 +31,5 @@ int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int tu104_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 5a77498..127f480 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,4 +30,5 @@ struct nvkm_fault_data {
 
 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int tu104_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 96ccc62..27298f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -105,7 +105,10 @@ enum nvkm_ram_type {
 	NVKM_RAM_TYPE_GDDR2,
 	NVKM_RAM_TYPE_GDDR3,
 	NVKM_RAM_TYPE_GDDR4,
-	NVKM_RAM_TYPE_GDDR5
+	NVKM_RAM_TYPE_GDDR5,
+	NVKM_RAM_TYPE_GDDR5X,
+	NVKM_RAM_TYPE_GDDR6,
+	NVKM_RAM_TYPE_HBM2,
 };
 
 struct nvkm_ram {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 61c93c8..b66dedd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -31,4 +31,5 @@ int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int tu104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 6885955..0a0e064 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -130,4 +130,5 @@ int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int tu104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index e9b0746..3693ebf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -28,6 +28,18 @@ struct nvkm_timer {
 u64 nvkm_timer_read(struct nvkm_timer *);
 void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
 
+struct nvkm_timer_wait {
+	struct nvkm_timer *tmr;
+	u64 limit;
+	u64 time0;
+	u64 time1;
+	int reads;
+};
+
+void nvkm_timer_wait_init(struct nvkm_device *, u64 nsec,
+			  struct nvkm_timer_wait *);
+s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
+
 /* Delay based on GPU time (ie. PTIMER).
  *
  * Will return -ETIMEDOUT unless the loop was terminated with 'break',
@@ -38,21 +50,17 @@ void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
  */
 #define NVKM_DELAY _warn = false;
 #define nvkm_nsec(d,n,cond...) ({                                              \
-	struct nvkm_device *_device = (d);                                     \
-	struct nvkm_timer *_tmr = _device->timer;                              \
-	u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr);                      \
-	s64 _taken = 0;                                                        \
+	struct nvkm_timer_wait _wait;                                          \
 	bool _warn = true;                                                     \
+	s64 _taken = 0;                                                        \
                                                                                \
+	nvkm_timer_wait_init((d), (n), &_wait);                                \
 	do {                                                                   \
 		cond                                                           \
-	} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs);    \
+	} while ((_taken = nvkm_timer_wait_test(&_wait)) >= 0);                \
                                                                                \
-	if (_taken >= _nsecs) {                                                \
-		if (_warn)                                                     \
-			dev_WARN(_device->dev, "timeout\n");                   \
-		_taken = -ETIMEDOUT;                                           \
-	}                                                                      \
+	if (_warn && _taken < 0)                                               \
+		dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n");          \
 	_taken;                                                                \
 })
 #define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e67a471..b06cdac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -306,7 +306,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 
 	/* create channel object and initialise dma and fence management */
 	ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
-				  init->tt_ctxdma_handle, &chan->chan);
+				  init->tt_ctxdma_handle, false, &chan->chan);
 	if (ret)
 		goto done;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7214022..73eff52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
 		int (*init)(struct nouveau_channel *, u32 handle);
 	} _methods[] = {
+		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
+		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
 		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 92d3115..668afbc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -29,6 +29,7 @@
 #include <nvif/cl506f.h>
 #include <nvif/cl906f.h>
 #include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
 #include <nvif/ioctl.h>
 
 /*XXX*/
@@ -217,10 +218,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 
 static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
-		    u64 runlist, struct nouveau_channel **pchan)
+		    u64 runlist, bool priv, struct nouveau_channel **pchan)
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
-	static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
+	static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+					VOLTA_CHANNEL_GPFIFO_A,
 					PASCAL_CHANNEL_GPFIFO_A,
 					MAXWELL_CHANNEL_GPFIFO_A,
 					KEPLER_CHANNEL_GPFIFO_B,
@@ -234,6 +236,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		struct nv50_channel_gpfifo_v0 nv50;
 		struct fermi_channel_gpfifo_v0 fermi;
 		struct kepler_channel_gpfifo_a_v0 kepler;
+		struct volta_channel_gpfifo_a_v0 volta;
 	} args;
 	struct nouveau_channel *chan;
 	u32 size;
@@ -247,12 +250,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 
 	/* create channel object */
 	do {
+		if (oclass[0] >= VOLTA_CHANNEL_GPFIFO_A) {
+			args.volta.version = 0;
+			args.volta.ilength = 0x02000;
+			args.volta.ioffset = 0x10000 + chan->push.addr;
+			args.volta.runlist = runlist;
+			args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
+			args.volta.priv = priv;
+			size = sizeof(args.volta);
+		} else
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.addr;
 			args.kepler.runlist = runlist;
 			args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
+			args.kepler.priv = priv;
 			size = sizeof(args.kepler);
 		} else
 		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
@@ -273,13 +286,20 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		ret = nvif_object_init(&device->object, 0, *oclass++,
 				       &args, size, &chan->user);
 		if (ret == 0) {
-			if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+			if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
+				chan->chid = args.volta.chid;
+				chan->inst = args.volta.inst;
+				chan->token = args.volta.token;
+			} else
+			if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) {
 				chan->chid = args.kepler.chid;
-			else
-			if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+				chan->inst = args.kepler.inst;
+			} else
+			if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
 				chan->chid = args.fermi.chid;
-			else
+			} else {
 				chan->chid = args.nv50.chid;
+			}
 			return ret;
 		}
 	} while (*oclass);
@@ -448,7 +468,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
 int
 nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
-		    u32 arg0, u32 arg1, struct nouveau_channel **pchan)
+		    u32 arg0, u32 arg1, bool priv,
+		    struct nouveau_channel **pchan)
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
 	bool super;
@@ -458,7 +479,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
 	super = cli->base.super;
 	cli->base.super = true;
 
-	ret = nouveau_channel_ind(drm, device, arg0, pchan);
+	ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
 	if (ret) {
 		NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
 		ret = nouveau_channel_dma(drm, device, pchan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 64454c2..28418f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -10,6 +10,8 @@ struct nouveau_channel {
 	struct nouveau_drm *drm;
 
 	int chid;
+	u64 inst;
+	u32 token;
 
 	struct nvif_object vram;
 	struct nvif_object gart;
@@ -48,7 +50,8 @@ struct nouveau_channel {
 int nouveau_channels_init(struct nouveau_drm *);
 
 int  nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
-			 u32 arg0, u32 arg1, struct nouveau_channel **);
+			 u32 arg0, u32 arg1, bool priv,
+			 struct nouveau_channel **);
 void nouveau_channel_del(struct nouveau_channel **);
 int  nouveau_channel_idle(struct nouveau_channel *);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fd80661..3f463c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -403,6 +403,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
 	if (nv_connector->aux.transfer) {
 		drm_dp_cec_unregister_connector(&nv_connector->aux);
 		drm_dp_aux_unregister(&nv_connector->aux);
+		kfree(nv_connector->aux.name);
 	}
 	kfree(connector);
 }
@@ -1218,7 +1219,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
 	case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
 	case DCB_CONNECTOR_DMS59_DP0:
 	case DCB_CONNECTOR_DMS59_DP1:
-	case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
+	case DCB_CONNECTOR_DP       :
+	case DCB_CONNECTOR_USB_C    : return DRM_MODE_CONNECTOR_DisplayPort;
 	case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
 	case DCB_CONNECTOR_HDMI_0   :
 	case DCB_CONNECTOR_HDMI_1   :
@@ -1232,7 +1234,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
 }
 
 struct drm_connector *
-nouveau_connector_create(struct drm_device *dev, int index)
+nouveau_connector_create(struct drm_device *dev,
+			 const struct dcb_output *dcbe)
 {
 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
 	struct nouveau_drm *drm = nouveau_drm(dev);
@@ -1240,6 +1243,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	struct nouveau_connector *nv_connector = NULL;
 	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
+	char aux_name[48] = {0};
+	int index = dcbe->connector;
 	int type, ret = 0;
 	bool dummy;
 
@@ -1342,6 +1347,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	case DRM_MODE_CONNECTOR_eDP:
 		nv_connector->aux.dev = dev->dev;
 		nv_connector->aux.transfer = nouveau_connector_aux_xfer;
+		snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
+			 dcbe->hasht, dcbe->hashm);
+		nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
 		ret = drm_dp_aux_register(&nv_connector->aux);
 		if (ret) {
 			NV_ERROR(drm, "failed to register aux channel\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index f57ef35..f43a8d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -38,6 +38,7 @@
 #include "nouveau_encoder.h"
 
 struct nvkm_i2c_port;
+struct dcb_output;
 
 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
 struct nouveau_backlight;
@@ -113,7 +114,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 }
 
 struct drm_connector *
-nouveau_connector_create(struct drm_device *, int index);
+nouveau_connector_create(struct drm_device *, const struct dcb_output *);
 
 extern int nouveau_tv_disable;
 extern int nouveau_ignorelid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 9109b69..88a52f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,6 +47,26 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
 }
 
 static int
+nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+	int ret;
+
+	ret = pm_runtime_get_sync(drm->dev->dev);
+	if (ret < 0 && ret != -EACCES)
+		return ret;
+
+	seq_printf(m, "0x%08x\n",
+		   nvif_rd32(&drm->client.device.object, 0x101000));
+
+	pm_runtime_mark_last_busy(drm->dev->dev);
+	pm_runtime_put_autosuspend(drm->dev->dev);
+
+	return 0;
+}
+
+static int
 nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
 {
 	struct drm_device *drm = m->private;
@@ -185,7 +205,8 @@ static const struct file_operations nouveau_pstate_fops = {
 };
 
 static struct drm_info_list nouveau_debugfs_list[] = {
-	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+	{ "vbios.rom",  nouveau_debugfs_vbios_image, 0, NULL },
+	{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
@@ -199,8 +220,9 @@ static const struct nouveau_debugfs_files {
 int
 nouveau_drm_debugfs_init(struct drm_minor *minor)
 {
+	struct nouveau_drm *drm = nouveau_drm(minor->dev);
 	struct dentry *dentry;
-	int i;
+	int i, ret;
 
 	for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
 		dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
@@ -211,9 +233,23 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
 			return -ENOMEM;
 	}
 
-	return drm_debugfs_create_files(nouveau_debugfs_list,
-					NOUVEAU_DEBUGFS_ENTRIES,
-					minor->debugfs_root, minor);
+	ret = drm_debugfs_create_files(nouveau_debugfs_list,
+				       NOUVEAU_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	/* Set the size of the vbios since we know it, and it's confusing to
+	 * userspace if it wants to seek() but the file has a length of 0
+	 */
+	dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
+	if (!dentry)
+		return 0;
+
+	d_inode(dentry)->i_size = drm->vbios.length;
+	dput(dentry);
+
+	return 0;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 945afd3..078f65d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -101,7 +101,7 @@ nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
 
 	nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
 	if (user->func && user->func->doorbell)
-		user->func->doorbell(user, chan->chid);
+		user->func->doorbell(user, chan->token);
 	chan->dma.ib_free--;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d2928d4..f900e94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -353,6 +353,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 		case MAXWELL_CHANNEL_GPFIFO_A:
 		case PASCAL_CHANNEL_GPFIFO_A:
 		case VOLTA_CHANNEL_GPFIFO_A:
+		case TURING_CHANNEL_GPFIFO_A:
 			ret = nvc0_fence_create(drm);
 			break;
 		default:
@@ -370,7 +371,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 		ret = nouveau_channel_new(drm, &drm->client.device,
 					  nvif_fifo_runlist_ce(device), 0,
-					  &drm->cechan);
+					  true, &drm->cechan);
 		if (ret)
 			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
@@ -381,7 +382,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	    device->info.chipset != 0xaa &&
 	    device->info.chipset != 0xac) {
 		ret = nouveau_channel_new(drm, &drm->client.device,
-					  NvDmaFB, NvDmaTT, &drm->cechan);
+					  NvDmaFB, NvDmaTT, false,
+					  &drm->cechan);
 		if (ret)
 			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
@@ -393,7 +395,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	}
 
 	ret = nouveau_channel_new(drm, &drm->client.device,
-				  arg0, arg1, &drm->channel);
+				  arg0, arg1, false, &drm->channel);
 	if (ret) {
 		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
 		nouveau_accel_fini(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0b2191f..d20b9ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -146,8 +146,6 @@ struct nouveau_drm {
 
 	/* TTM interface support */
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 		atomic_t validate_sequence;
 		int (*move)(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99be61d..d4964f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -341,7 +341,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 	int ret = 0, i;
 
 	if (!exclusive) {
-		ret = reservation_object_reserve_shared(resv);
+		ret = reservation_object_reserve_shared(resv, 1);
 
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8edb9f2..1543c2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,66 +175,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 static int
-nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void
-nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-int
-nouveau_ttm_global_init(struct nouveau_drm *drm)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &drm->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &nouveau_ttm_mem_global_init;
-	global_ref->release = &nouveau_ttm_mem_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM memory accounting\n");
-		drm->ttm.mem_global_ref.release = NULL;
-		return ret;
-	}
-
-	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
-	global_ref = &drm->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM BO subsystem\n");
-		drm_global_item_unref(&drm->ttm.mem_global_ref);
-		drm->ttm.mem_global_ref.release = NULL;
-		return ret;
-	}
-
-	return 0;
-}
-
-void
-nouveau_ttm_global_release(struct nouveau_drm *drm)
-{
-	if (drm->ttm.mem_global_ref.release == NULL)
-		return;
-
-	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&drm->ttm.mem_global_ref);
-	drm->ttm.mem_global_ref.release = NULL;
-}
-
-static int
 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
 	struct nvif_mmu *mmu = &drm->client.mmu;
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 		drm->agp.cma = pci->agp.cma;
 	}
 
-	ret = nouveau_ttm_global_init(drm);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&drm->ttm.bdev,
-				  drm->ttm.bo_global_ref.ref.object,
 				  &nouveau_bo_driver,
 				  dev->anon_inode->i_mapping,
 				  DRM_FILE_PAGE_OFFSET,
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
 
 	ttm_bo_device_release(&drm->ttm.bdev);
 
-	nouveau_ttm_global_release(drm);
-
 	arch_phys_wc_del(drm->ttm.mtrr);
 	drm->ttm.mtrr = 0;
 	arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index 7e3b118..ede872f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -25,7 +25,6 @@ void nouveau_vma_unmap(struct nouveau_vma *);
 struct nouveau_vmm {
 	struct nouveau_cli *cli;
 	struct nvif_vmm vmm;
-	struct nvkm_vm *vm;
 };
 
 int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 18c7d06..ef97dd2 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -34,6 +34,7 @@ int
 nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
 {
 	static const struct nvif_mclass disps[] = {
+		{ TU104_DISP, -1 },
 		{ GV100_DISP, -1 },
 		{ GP102_DISP, -1 },
 		{ GP100_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 03f676c..c61b467 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -79,7 +79,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
 	[NVKM_ENGINE_NVENC0  ] = "nvenc0",
 	[NVKM_ENGINE_NVENC1  ] = "nvenc1",
 	[NVKM_ENGINE_NVENC2  ] = "nvenc2",
-	[NVKM_ENGINE_NVDEC   ] = "nvdec",
+	[NVKM_ENGINE_NVDEC0  ] = "nvdec0",
+	[NVKM_ENGINE_NVDEC1  ] = "nvdec1",
+	[NVKM_ENGINE_NVDEC2  ] = "nvdec2",
 	[NVKM_ENGINE_PM      ] = "pm",
 	[NVKM_ENGINE_SEC     ] = "sec",
 	[NVKM_ENGINE_SEC2    ] = "sec2",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 80d7844..177a233 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -6,3 +6,4 @@
 nvkm-y += nvkm/engine/ce/gp100.o
 nvkm-y += nvkm/engine/ce/gp102.o
 nvkm-y += nvkm/engine/ce/gv100.o
+nvkm-y += nvkm/engine/ce/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
new file mode 100644
index 0000000..3c25043
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu104_ce = {
+	.intr = gp100_ce_intr,
+	.sclass = {
+		{ -1, -1, TURING_DMA_COPY_A },
+		{}
+	}
+};
+
+int
+tu104_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e294013..bfbc934 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2221,7 +2221,7 @@ nv132_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp102_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2257,7 +2257,7 @@ nv134_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp104_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2293,7 +2293,7 @@ nv136_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp104_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2329,7 +2329,7 @@ nv137_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp107_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2365,7 +2365,7 @@ nv138_chipset = {
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
 	.gr = gp107_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
 };
@@ -2430,10 +2430,74 @@ nv140_chipset = {
 	.dma = gv100_dma_new,
 	.fifo = gv100_fifo_new,
 	.gr = gv100_gr_new,
-	.nvdec = gp102_nvdec_new,
+	.nvdec[0] = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 };
 
+static const struct nvkm_device_chip
+nv164_chipset = {
+	.name = "TU104",
+	.bar = tu104_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = tu104_devinit_new,
+	.fault = tu104_fault_new,
+	.fb = gv100_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm200_i2c_new,
+	.ibus = gm200_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gp102_ltc_new,
+	.mc = tu104_mc_new,
+	.mmu = tu104_mmu_new,
+	.pci = gp100_pci_new,
+	.pmu = gp102_pmu_new,
+	.therm = gp100_therm_new,
+	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
+	.ce[0] = tu104_ce_new,
+	.ce[1] = tu104_ce_new,
+	.ce[2] = tu104_ce_new,
+	.ce[3] = tu104_ce_new,
+	.ce[4] = tu104_ce_new,
+	.disp = tu104_disp_new,
+	.dma = gv100_dma_new,
+	.fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv166_chipset = {
+	.name = "TU106",
+	.bar = tu104_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = tu104_devinit_new,
+	.fault = tu104_fault_new,
+	.fb = gv100_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm200_i2c_new,
+	.ibus = gm200_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gp102_ltc_new,
+	.mc = tu104_mc_new,
+	.mmu = tu104_mmu_new,
+	.pci = gp100_pci_new,
+	.pmu = gp102_pmu_new,
+	.therm = gp100_therm_new,
+	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
+	.ce[0] = tu104_ce_new,
+	.ce[1] = tu104_ce_new,
+	.ce[2] = tu104_ce_new,
+	.ce[3] = tu104_ce_new,
+	.ce[4] = tu104_ce_new,
+	.disp = tu104_disp_new,
+	.dma = gv100_dma_new,
+	.fifo = tu104_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
 		       struct nvkm_notify *notify)
@@ -2529,7 +2593,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
 	_(NVENC0 , device->nvenc[0],  device->nvenc[0]);
 	_(NVENC1 , device->nvenc[1],  device->nvenc[1]);
 	_(NVENC2 , device->nvenc[2],  device->nvenc[2]);
-	_(NVDEC  , device->nvdec   , &device->nvdec->engine);
+	_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
+	_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
+	_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
 	_(PM     , device->pm      , &device->pm->engine);
 	_(SEC    , device->sec     ,  device->sec);
 	_(SEC2   , device->sec2    , &device->sec2->engine);
@@ -2791,6 +2857,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 			case 0x120: device->card_type = GM100; break;
 			case 0x130: device->card_type = GP100; break;
 			case 0x140: device->card_type = GV100; break;
+			case 0x160: device->card_type = TU100; break;
 			default:
 				break;
 			}
@@ -2883,6 +2950,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		case 0x138: device->chip = &nv138_chipset; break;
 		case 0x13b: device->chip = &nv13b_chipset; break;
 		case 0x140: device->chip = &nv140_chipset; break;
+		case 0x164: device->chip = &nv164_chipset; break;
+		case 0x166: device->chip = &nv166_chipset; break;
 		default:
 			nvdev_error(device, "unknown chipset (%08x)\n", boot0);
 			goto done;
@@ -2988,7 +3057,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		_(NVKM_ENGINE_NVENC0  , nvenc[0]);
 		_(NVKM_ENGINE_NVENC1  , nvenc[1]);
 		_(NVKM_ENGINE_NVENC2  , nvenc[2]);
-		_(NVKM_ENGINE_NVDEC   ,    nvdec);
+		_(NVKM_ENGINE_NVDEC0  , nvdec[0]);
+		_(NVKM_ENGINE_NVDEC1  , nvdec[1]);
+		_(NVKM_ENGINE_NVDEC2  , nvdec[2]);
 		_(NVKM_ENGINE_PM      ,       pm);
 		_(NVKM_ENGINE_SEC     ,      sec);
 		_(NVKM_ENGINE_SEC2    ,     sec2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index dde6bba..092ddc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -91,7 +91,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
 	case ENGINE_A(MSENC ); break;
 	case ENGINE_A(VIC   ); break;
 	case ENGINE_A(SEC2  ); break;
-	case ENGINE_A(NVDEC ); break;
+	case ENGINE_B(NVDEC ); break;
 	case ENGINE_B(NVENC ); break;
 	default:
 		args->mthd = NV_DEVICE_INFO_INVALID;
@@ -175,6 +175,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
 	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
 	case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
 	case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
+	case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
 	default:
 		args->v0.family = 0;
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 8089ac9..c6a257b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -15,6 +15,7 @@
 nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
+nvkm-y += nvkm/engine/disp/tu104.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
@@ -38,6 +39,7 @@
 nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/sorgv100.o
+nvkm-y += nvkm/engine/disp/sortu104.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -69,6 +71,7 @@
 nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
+nvkm-y += nvkm/engine/disp/roottu104.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index d0a7e34..47be0ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -28,7 +28,7 @@
 #include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-static int
+int
 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
 {
 	struct nvkm_device *device = disp->engine.subdev.device;
@@ -36,7 +36,7 @@ gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
 	return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
 }
 
-static void
+void
 gv100_disp_super(struct work_struct *work)
 {
 	struct nv50_disp *disp =
@@ -257,7 +257,7 @@ gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
 	}
 }
 
-static void
+void
 gv100_disp_intr(struct nv50_disp *disp)
 {
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -297,7 +297,7 @@ gv100_disp_intr(struct nv50_disp *disp)
 		nvkm_warn(subdev, "intr %08x\n", stat);
 }
 
-static void
+void
 gv100_disp_fini(struct nv50_disp *disp)
 {
 	struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 0f0c86c..790e42f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -144,6 +144,11 @@ void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
 int gm200_sor_route_get(struct nvkm_outp *, int *);
 void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
 
+void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
+void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
+void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
+void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+
 void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -195,4 +200,6 @@ int gm200_sor_new(struct nvkm_disp *, int);
 
 int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gv100_sor_new(struct nvkm_disp *, int);
+
+int tu104_sor_new(struct nvkm_disp *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 8580382..c36a8a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -78,6 +78,11 @@ void gf119_disp_intr(struct nv50_disp *);
 void gf119_disp_super(struct work_struct *);
 void gf119_disp_intr_error(struct nv50_disp *, int);
 
+void gv100_disp_fini(struct nv50_disp *);
+void gv100_disp_intr(struct nv50_disp *);
+void gv100_disp_super(struct work_struct *);
+int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+
 void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 6ca4f91..97de928 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -37,4 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
 extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
new file mode 100644
index 0000000..ad438c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+tu104_disp_root = {
+	.user = {
+		{{0,0,TU104_DISP_CURSOR                }, gv100_disp_curs_new },
+		{{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+		{{0,0,TU104_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+		{{0,0,TU104_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+		{}
+	},
+};
+
+static int
+tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+tu104_disp_root_oclass = {
+	.base.oclass = TU104_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = tu104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index 8ba881a..b0597ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -23,7 +23,7 @@
 
 #include <subdev/timer.h>
 
-static void
+void
 gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -31,7 +31,7 @@ gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
 	nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
 }
 
-static void
+void
 gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -40,7 +40,7 @@ gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
 	nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
 }
 
-static void
+void
 gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -54,7 +54,7 @@ gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
 	);
 }
 
-static void
+void
 gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
new file mode 100644
index 0000000..df026a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+		  u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 hoff = head * 0x800;
+
+	nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
+	nvkm_mask(device, 0x616578 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+}
+
+static int
+tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 soff = nv50_ior_base(sor);
+	const u32 loff = nv50_sor_link(sor);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+
+	clksor |= sor->dp.bw << 18;
+	dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+	if (sor->dp.mst)
+		dpctrl |= 0x40000000;
+	if (sor->dp.ef)
+		dpctrl |= 0x00004000;
+
+	nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+	/*XXX*/
+	nvkm_msec(device, 40, NVKM_DELAY);
+	nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+	nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+	nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+	return 0;
+}
+
+static const struct nvkm_ior_func
+tu104_sor = {
+	.route = {
+		.get = gm200_sor_route_get,
+		.set = gm200_sor_route_set,
+	},
+	.state = gv100_sor_state,
+	.power = nv50_sor_power,
+	.clock = gf119_sor_clock,
+	.hdmi = {
+		.ctrl = gv100_hdmi_ctrl,
+	},
+	.dp = {
+		.lanes = { 0, 1, 2, 3 },
+		.links = tu104_sor_dp_links,
+		.power = g94_sor_dp_power,
+		.pattern = gm107_sor_dp_pattern,
+		.drive = gm200_sor_dp_drive,
+		.vcpi = tu104_sor_dp_vcpi,
+		.audio = gv100_sor_dp_audio,
+		.audio_sym = gv100_sor_dp_audio_sym,
+		.watermark = gv100_sor_dp_watermark,
+	},
+	.hda = {
+		.hpd = gf119_hda_hpd,
+		.eld = gf119_hda_eld,
+	},
+};
+
+int
+tu104_sor_new(struct nvkm_disp *disp, int id)
+{
+	return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
new file mode 100644
index 0000000..13fa214
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
+static int
+tu104_disp_init(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_head *head;
+	int i, j;
+	u32 tmp;
+
+	/* Claim ownership of display. */
+	if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
+		nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* Lock pin capabilities. */
+	tmp = 0x00000021; /*XXX*/
+	nvkm_wr32(device, 0x640008, tmp);
+
+	/* SOR capabilities. */
+	for (i = 0; i < disp->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
+		nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
+	}
+
+	/* Head capabilities. */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const int id = head->id;
+
+		/* RG. */
+		tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
+		nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
+
+		/* POSTCOMP. */
+		for (j = 0; j < 5 * 4; j += 4) {
+			tmp = nvkm_rd32(device, 0x616140 + (id * 0x800) + j);
+			nvkm_wr32(device, 0x640680 + (id * 0x20) + j, tmp);
+		}
+	}
+
+	/* Window capabilities. */
+	for (i = 0; i < disp->wndw.nr; i++) {
+		nvkm_mask(device, 0x640004, 1 << i, 1 << i);
+		for (j = 0; j < 6 * 4; j += 4) {
+			tmp = nvkm_rd32(device, 0x630100 + (i * 0x800) + j);
+			nvkm_mask(device, 0x640780 + (i * 0x20) + j, 0xffffffff, tmp);
+		}
+		nvkm_mask(device, 0x64000c, 0x00000100, 0x00000100);
+	}
+
+	/* IHUB capabilities. */
+	for (i = 0; i < 3; i++) {
+		tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
+		nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
+	}
+
+	nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
+
+	/* Setup instance memory. */
+	switch (nvkm_memory_target(disp->inst->memory)) {
+	case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
+	case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
+	case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
+	default:
+		break;
+	}
+	nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
+	nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
+
+	/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
+	nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
+	nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
+
+	/* EXC_OTHER: CURSn, CORE. */
+	nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
+				    0x00000001); /* MSK. */
+	nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
+
+	/* EXC_WINIM. */
+	nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
+	nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
+
+	/* EXC_WIN. */
+	nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
+	nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
+
+	/* HEAD_TIMING(n): VBLANK. */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const u32 hoff = head->id * 4;
+		nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
+		nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
+	}
+
+	/* OR. */
+	nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
+	nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
+	return 0;
+}
+
+static const struct nv50_disp_func
+tu104_disp = {
+	.init = tu104_disp_init,
+	.fini = gv100_disp_fini,
+	.intr = gv100_disp_intr,
+	.uevent = &gv100_disp_chan_uevent,
+	.super = gv100_disp_super,
+	.root = &tu104_disp_root_oclass,
+	.wndw = { .cnt = gv100_disp_wndw_cnt },
+	.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+	.sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+	.ramht_size = 0x2000,
+};
+
+int
+tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
index 9891180..5d3b641 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
@@ -118,7 +118,7 @@ gv100_disp_wndw_mthd_base = {
 
 const struct nv50_disp_chan_mthd
 gv100_disp_wndw_mthd = {
-	.name = "Base",
+	.name = "Window",
 	.addr = 0x001000,
 	.prev = 0x000800,
 	.data = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index f004085..87d8e054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -16,6 +16,7 @@
 nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu104.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -33,5 +34,7 @@
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
 nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+nvkm-y += nvkm/engine/fifo/gpfifotu104.o
 
 nvkm-y += nvkm/engine/fifo/usergv100.o
+nvkm-y += nvkm/engine/fifo/usertu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 3ffef23..2c7c5af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,6 +17,7 @@ struct nvkm_fifo_chan_func {
 			    bool suspend);
 	int  (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
 	void (*object_dtor)(struct nvkm_fifo_chan *, int);
+	u32 (*submit_token)(struct nvkm_fifo_chan *);
 };
 
 int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 8e28ba6..a14545d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
 	struct list_head head;
 	bool killed;
 
+	struct nvkm_memory *mthd;
+
 	struct {
 		struct nvkm_gpuobj *inst;
 		struct nvkm_vma *vma;
@@ -36,4 +38,15 @@ int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
 
 int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
 			  void *data, u32 size, struct nvkm_object **);
+int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
+			   struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
+			   u64 *, bool, u32 *, const struct nvkm_oclass *,
+			   struct nvkm_object **);
+int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
+				  struct nvkm_engine *);
+int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
+				  struct nvkm_engine *, bool);
+
+int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+			  void *data, u32 size, struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index f695768..10a2e70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -346,10 +346,10 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 	if (eu && eu->data2) {
 		switch (eu->data2) {
 		case NVKM_SUBDEV_BAR:
-			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			nvkm_bar_bar1_reset(device);
 			break;
 		case NVKM_SUBDEV_INSTMEM:
-			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			nvkm_bar_bar2_reset(device);
 			break;
 		case NVKM_ENGINE_IFB:
 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index afccf97..1053fe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -149,16 +149,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 }
 
 void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+			  struct nvkm_memory *mem, int nr)
+{
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int target;
+
+	switch (nvkm_memory_target(mem)) {
+	case NVKM_MEM_TARGET_VRAM: target = 0; break;
+	case NVKM_MEM_TARGET_NCOH: target = 3; break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+				    (target << 28));
+	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+			break;
+	) < 0)
+		nvkm_error(subdev, "runlist %d update timeout\n", runl);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
 {
 	const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
 	struct gk104_fifo_chan *chan;
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
-	struct nvkm_device *device = subdev->device;
 	struct nvkm_memory *mem;
 	struct nvkm_fifo_cgrp *cgrp;
 	int nr = 0;
-	int target;
 
 	mutex_lock(&subdev->mutex);
 	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
@@ -177,24 +202,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 	}
 	nvkm_done(mem);
 
-	switch (nvkm_memory_target(mem)) {
-	case NVKM_MEM_TARGET_VRAM: target = 0; break;
-	case NVKM_MEM_TARGET_NCOH: target = 3; break;
-	default:
-		WARN_ON(1);
-		goto unlock;
-	}
-
-	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
-				    (target << 28));
-	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
-
-	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
-			break;
-	) < 0)
-		nvkm_error(subdev, "runlist %d update timeout\n", runl);
-unlock:
+	func->commit(fifo, runl, mem, nr);
 	mutex_unlock(&subdev->mutex);
 }
 
@@ -238,6 +246,29 @@ const struct gk104_fifo_runlist_func
 gk104_fifo_runlist = {
 	.size = 8,
 	.chan = gk104_fifo_runlist_chan,
+	.commit = gk104_fifo_runlist_commit,
+};
+
+void
+gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+}
+
+int
+gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	/* Determine number of PBDMAs by checking valid enable bits. */
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	return hweight32(nvkm_rd32(device, 0x000204));
+}
+
+const struct gk104_fifo_pbdma_func
+gk104_fifo_pbdma = {
+	.nr = gk104_fifo_pbdma_nr,
+	.init = gk104_fifo_pbdma_init,
 };
 
 static void
@@ -267,7 +298,7 @@ gk104_fifo_recover_work(struct work_struct *w)
 	}
 
 	for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
-		gk104_fifo_runlist_commit(fifo, runl);
+		gk104_fifo_runlist_update(fifo, runl);
 
 	nvkm_wr32(device, 0x00262c, runm);
 	nvkm_mask(device, 0x002630, runm, 0x00000000);
@@ -456,10 +487,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
 	if (ee && ee->data2) {
 		switch (ee->data2) {
 		case NVKM_SUBDEV_BAR:
-			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			nvkm_bar_bar1_reset(device);
 			break;
 		case NVKM_SUBDEV_INSTMEM:
-			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			nvkm_bar_bar2_reset(device);
 			break;
 		case NVKM_ENGINE_IFB:
 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
@@ -904,9 +935,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 	enum nvkm_devidx engidx;
 	u32 *map;
 
-	/* Determine number of PBDMAs by checking valid enable bits. */
-	nvkm_wr32(device, 0x000204, 0xffffffff);
-	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+	fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
 	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 
 	/* Read PBDMA->runlist(s) mapping from HW. */
@@ -978,7 +1007,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
 	int i;
 
 	/* Enable PBDMAs. */
-	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+	fifo->func->pbdma->init(fifo);
 
 	/* PBDMA[n] */
 	for (i = 0; i < fifo->pbdma_nr; i++) {
@@ -995,8 +1024,8 @@ gk104_fifo_init(struct nvkm_fifo *base)
 
 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 
-	if (fifo->func->init_pbdma_timeout)
-		fifo->func->init_pbdma_timeout(fifo);
+	if (fifo->func->pbdma->init_timeout)
+		fifo->func->pbdma->init_timeout(fifo);
 
 	nvkm_wr32(device, 0x002100, 0xffffffff);
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -1175,6 +1204,7 @@ gk104_fifo_fault_gpcclient[] = {
 
 static const struct gk104_fifo_func
 gk104_fifo = {
+	.pbdma = &gk104_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index d295b81..d4e5656 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,7 +45,11 @@ struct gk104_fifo {
 };
 
 struct gk104_fifo_func {
-	void (*init_pbdma_timeout)(struct gk104_fifo *);
+	const struct gk104_fifo_pbdma_func {
+		int (*nr)(struct gk104_fifo *);
+		void (*init)(struct gk104_fifo *);
+		void (*init_timeout)(struct gk104_fifo *);
+	} *pbdma;
 
 	struct {
 		const struct nvkm_enum *access;
@@ -61,6 +65,8 @@ struct gk104_fifo_func {
 			     struct nvkm_memory *, u32 offset);
 		void (*chan)(struct gk104_fifo_chan *,
 			     struct nvkm_memory *, u32 offset);
+		void (*commit)(struct gk104_fifo *, int runl,
+			       struct nvkm_memory *, int entries);
 	} *runlist;
 
 	struct gk104_fifo_user_user {
@@ -81,8 +87,11 @@ int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
 		    int index, int nr, struct nvkm_fifo **);
 void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
+void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
 
+extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
+int gk104_fifo_pbdma_nr(struct gk104_fifo *);
+void gk104_fifo_pbdma_init(struct gk104_fifo *);
 extern const struct nvkm_enum gk104_fifo_fault_access[];
 extern const struct nvkm_enum gk104_fifo_fault_engine[];
 extern const struct nvkm_enum gk104_fifo_fault_reason[];
@@ -91,15 +100,30 @@ extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
 extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
 void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
 			     struct nvkm_memory *, u32);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
+			       struct nvkm_memory *, int);
 
 extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
 void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
 			     struct nvkm_memory *, u32);
 
-void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
+extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
+void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
 
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
 extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
 
+extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
+int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
+
+extern const struct nvkm_enum gv100_fifo_fault_access[];
+extern const struct nvkm_enum gv100_fifo_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_fault_gpcclient[];
+void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+			     struct nvkm_memory *, u32);
+void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
+			     struct nvkm_memory *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index ac7655a1..8adfa6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -43,10 +43,12 @@ gk110_fifo_runlist = {
 	.size = 8,
 	.cgrp = gk110_fifo_runlist_cgrp,
 	.chan = gk104_fifo_runlist_chan,
+	.commit = gk104_fifo_runlist_commit,
 };
 
 static const struct gk104_fifo_func
 gk110_fifo = {
+	.pbdma = &gk104_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 5ea7e45..9553fb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -27,7 +27,7 @@
 #include <nvif/class.h>
 
 void
-gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
+gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int i;
@@ -36,9 +36,16 @@ gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
 		nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
 }
 
+const struct gk104_fifo_pbdma_func
+gk208_fifo_pbdma = {
+	.nr = gk104_fifo_pbdma_nr,
+	.init = gk104_fifo_pbdma_init,
+	.init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
 static const struct gk104_fifo_func
 gk208_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gk208_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 535a0eb..a4c6ac3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gk20a_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gk208_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 79ae19b..acf2307 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -41,6 +41,7 @@ gm107_fifo_runlist = {
 	.size = 8,
 	.cgrp = gk110_fifo_runlist_cgrp,
 	.chan = gm107_fifo_runlist_chan,
+	.commit = gk104_fifo_runlist_commit,
 };
 
 const struct nvkm_enum
@@ -68,7 +69,7 @@ gm107_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gm107_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gk208_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 49565fa..b96c1c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -26,9 +26,23 @@
 
 #include <nvif/class.h>
 
+int
+gm200_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	return nvkm_rd32(device, 0x002004) & 0x000000ff;
+}
+
+const struct gk104_fifo_pbdma_func
+gm200_fifo_pbdma = {
+	.nr = gm200_fifo_pbdma_nr,
+	.init = gk104_fifo_pbdma_init,
+	.init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
 static const struct gk104_fifo_func
 gm200_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gm200_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 46736513..a49539b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gm20b_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gm200_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index e2f8f90..54377e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -52,7 +52,7 @@ gp100_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gp100_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gm200_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 7733bf7..778ba7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gp10b_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gm200_fifo_pbdma,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 118b37a..728a1ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -85,7 +85,7 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 	case NVKM_ENGINE_MSVLD : return 0x0270;
 	case NVKM_ENGINE_VIC   : return 0x0280;
 	case NVKM_ENGINE_MSENC : return 0x0290;
-	case NVKM_ENGINE_NVDEC : return 0x02100270;
+	case NVKM_ENGINE_NVDEC0: return 0x02100270;
 	case NVKM_ENGINE_NVENC0: return 0x02100290;
 	case NVKM_ENGINE_NVENC1: return 0x0210;
 	default:
@@ -192,7 +192,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
 		gk104_fifo_runlist_remove(fifo, chan);
 		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
 		gk104_fifo_gpfifo_kick(chan);
-		gk104_fifo_runlist_commit(fifo, chan->runl);
+		gk104_fifo_runlist_update(fifo, chan->runl);
 	}
 
 	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -213,7 +213,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 	if (list_empty(&chan->head) && !chan->killed) {
 		gk104_fifo_runlist_insert(fifo, chan);
 		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
-		gk104_fifo_runlist_commit(fifo, chan->runl);
+		gk104_fifo_runlist_update(fifo, chan->runl);
 		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
 	}
 }
@@ -222,6 +222,7 @@ void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_memory_unref(&chan->mthd);
 	kfree(chan->cgrp);
 	return chan;
 }
@@ -240,7 +241,7 @@ gk104_fifo_gpfifo_func = {
 
 static int
 gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
-		       u64 vmm, u64 ioffset, u64 ilength,
+		       u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
 		       const struct nvkm_oclass *oclass,
 		       struct nvkm_object **pobject)
 {
@@ -279,6 +280,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 		return ret;
 
 	*chid = chan->base.chid;
+	*inst = chan->base.inst->addr;
 
 	/* Hack to support GPUs where even individual channels should be
 	 * part of a channel group.
@@ -315,6 +317,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
 	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
 	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
 	nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
 	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
 	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
@@ -337,15 +340,19 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "ioffset %016llx ilength %08x "
-				   "runlist %016llx\n",
+				   "runlist %016llx priv %d\n",
 			   args->v0.version, args->v0.vmm, args->v0.ioffset,
-			   args->v0.ilength, args->v0.runlist);
+			   args->v0.ilength, args->v0.runlist, args->v0.priv);
+		if (args->v0.priv && !oclass->client->super)
+			return -EINVAL;
 		return gk104_fifo_gpfifo_new_(fifo,
 					      &args->v0.runlist,
 					      &args->v0.chid,
 					       args->v0.vmm,
 					       args->v0.ioffset,
 					       args->v0.ilength,
+					      &args->v0.inst,
+					       args->v0.priv,
 					      oclass, pobject);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index 9598853..a7462cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -25,9 +25,15 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 
-#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
 #include <nvif/unpack.h>
 
+static u32
+gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
+{
+	return chan->chid;
+}
+
 static int
 gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
 {
@@ -56,7 +62,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
 	return ret;
 }
 
-static int
+int
 gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine, bool suspend)
 {
@@ -79,7 +85,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 	return ret;
 }
 
-static int
+int
 gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
@@ -100,8 +106,8 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 	return gv100_fifo_gpfifo_engine_valid(chan, false, true);
 }
 
-const struct nvkm_fifo_chan_func
-gv100_fifo_gpfifo_func = {
+static const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo = {
 	.dtor = gk104_fifo_gpfifo_dtor,
 	.init = gk104_fifo_gpfifo_init,
 	.fini = gk104_fifo_gpfifo_fini,
@@ -110,19 +116,23 @@ gv100_fifo_gpfifo_func = {
 	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
 	.engine_init = gv100_fifo_gpfifo_engine_init,
 	.engine_fini = gv100_fifo_gpfifo_engine_fini,
+	.submit_token = gv100_fifo_gpfifo_submit_token,
 };
 
-static int
-gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
-		       u64 vmm, u64 ioffset, u64 ilength,
-		       const struct nvkm_oclass *oclass,
+int
+gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
+		       struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+		       u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+		       u32 *token, const struct nvkm_oclass *oclass,
 		       struct nvkm_object **pobject)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct gk104_fifo_chan *chan;
 	int runlist = ffs(*runlists) -1, ret, i;
 	unsigned long engm;
 	u64 subdevs = 0;
-	u64 usermem;
+	u64 usermem, mthd;
+	u32 size;
 
 	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
 		return -EINVAL;
@@ -142,14 +152,15 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 	chan->runl = runlist;
 	INIT_LIST_HEAD(&chan->head);
 
-	ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
-				  0x1000, 0x1000, true, vmm, 0, subdevs,
-				  1, fifo->user.bar->addr, 0x200,
+	ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
+				  0, subdevs, 1, fifo->user.bar->addr, 0x200,
 				  oclass, &chan->base);
 	if (ret)
 		return ret;
 
 	*chid = chan->base.chid;
+	*inst = chan->base.inst->addr;
+	*token = chan->base.func->submit_token(&chan->base);
 
 	/* Hack to support GPUs where even individual channels should be
 	 * part of a channel group.
@@ -173,6 +184,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 	nvkm_done(fifo->user.mem);
 	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
 
+	/* Allocate fault method buffer (magics come from nvgpu). */
+	size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+	size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+	size = roundup(size, PAGE_SIZE);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+			      &chan->mthd);
+	if (ret)
+		return ret;
+
+	mthd = nvkm_memory_bar2(chan->mthd);
+	if (mthd == ~0ULL)
+		return -EFAULT;
+
 	/* RAMFC */
 	nvkm_kmap(chan->base.inst);
 	nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -184,13 +209,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 					  (ilength << 16));
 	nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
 	nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
-	nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
+	nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
 	nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
-	nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
+	nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
 	nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
 	nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
-	nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
-	nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+	nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+	nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
 	nvkm_done(chan->base.inst);
 	return gv100_fifo_gpfifo_engine_valid(chan, true, true);
 }
@@ -201,7 +226,7 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 {
 	struct nvkm_object *parent = oclass->parent;
 	union {
-		struct kepler_channel_gpfifo_a_v0 v0;
+		struct volta_channel_gpfifo_a_v0 v0;
 	} *args = data;
 	int ret = -ENOSYS;
 
@@ -209,15 +234,20 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "ioffset %016llx ilength %08x "
-				   "runlist %016llx\n",
+				   "runlist %016llx priv %d\n",
 			   args->v0.version, args->v0.vmm, args->v0.ioffset,
-			   args->v0.ilength, args->v0.runlist);
-		return gv100_fifo_gpfifo_new_(fifo,
+			   args->v0.ilength, args->v0.runlist, args->v0.priv);
+		if (args->v0.priv && !oclass->client->super)
+			return -EINVAL;
+		return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
 					      &args->v0.runlist,
 					      &args->v0.chid,
 					       args->v0.vmm,
 					       args->v0.ioffset,
 					       args->v0.ilength,
+					      &args->v0.inst,
+					       args->v0.priv,
+					      &args->v0.token,
 					      oclass, pobject);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
new file mode 100644
index 0000000..ff70484d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	return (chan->runl << 16) | chan->base.chid;
+}
+
+static const struct nvkm_fifo_chan_func
+tu104_fifo_gpfifo = {
+	.dtor = gk104_fifo_gpfifo_dtor,
+	.init = gk104_fifo_gpfifo_init,
+	.fini = gk104_fifo_gpfifo_fini,
+	.ntfy = gf100_fifo_chan_ntfy,
+	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+	.engine_init = gv100_fifo_gpfifo_engine_init,
+	.engine_fini = gv100_fifo_gpfifo_engine_fini,
+	.submit_token = tu104_fifo_gpfifo_submit_token,
+};
+
+int
+tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct volta_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	int ret = -ENOSYS;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+				   "ioffset %016llx ilength %08x "
+				   "runlist %016llx priv %d\n",
+			   args->v0.version, args->v0.vmm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.runlist, args->v0.priv);
+		if (args->v0.priv && !oclass->client->super)
+			return -EINVAL;
+		return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+					      &args->v0.runlist,
+					      &args->v0.chid,
+					       args->v0.vmm,
+					       args->v0.ioffset,
+					       args->v0.ilength,
+					      &args->v0.inst,
+					       args->v0.priv,
+					      &args->v0.token,
+					      oclass, pobject);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 4e1d159..6ee1bb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -28,7 +28,7 @@
 
 #include <nvif/class.h>
 
-static void
+void
 gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
 			struct nvkm_memory *memory, u32 offset)
 {
@@ -42,7 +42,7 @@ gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
 	nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
 }
 
-static void
+void
 gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
 			struct nvkm_memory *memory, u32 offset)
 {
@@ -57,9 +57,10 @@ gv100_fifo_runlist = {
 	.size = 16,
 	.cgrp = gv100_fifo_runlist_cgrp,
 	.chan = gv100_fifo_runlist_chan,
+	.commit = gk104_fifo_runlist_commit,
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_gpcclient[] = {
 	{ 0x00, "T1_0" },
 	{ 0x01, "T1_1" },
@@ -161,7 +162,7 @@ gv100_fifo_fault_gpcclient[] = {
 	{}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_hubclient[] = {
 	{ 0x00, "VIP" },
 	{ 0x01, "CE0" },
@@ -223,7 +224,7 @@ gv100_fifo_fault_hubclient[] = {
 	{}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_reason[] = {
 	{ 0x00, "PDE" },
 	{ 0x01, "PDE_SIZE" },
@@ -271,7 +272,7 @@ gv100_fifo_fault_engine[] = {
 	{}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_access[] = {
 	{ 0x0, "VIRT_READ" },
 	{ 0x1, "VIRT_WRITE" },
@@ -287,7 +288,7 @@ gv100_fifo_fault_access[] = {
 
 static const struct gk104_fifo_func
 gv100_fifo = {
-	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.pbdma = &gm200_fifo_pbdma,
 	.fault.access = gv100_fifo_fault_access,
 	.fault.engine = gv100_fifo_fault_engine,
 	.fault.reason = gv100_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
new file mode 100644
index 0000000..98c8070
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+			  struct nvkm_memory *mem, int nr)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = nvkm_memory_addr(mem);
+	/*XXX: target? */
+
+	nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
+	nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
+	nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
+
+	/*XXX: how to wait? can you even wait? */
+}
+
+const struct gk104_fifo_runlist_func
+tu104_fifo_runlist = {
+	.size = 16,
+	.cgrp = gv100_fifo_runlist_cgrp,
+	.chan = gv100_fifo_runlist_chan,
+	.commit = tu104_fifo_runlist_commit,
+};
+
+static const struct nvkm_enum
+tu104_fifo_fault_engine[] = {
+	{ 0x01, "DISPLAY" },
+	{ 0x03, "PTP" },
+	{ 0x06, "PWR_PMU" },
+	{ 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x09, "PERF" },
+	{ 0x1f, "PHYSICAL" },
+	{ 0x20, "HOST0" },
+	{ 0x21, "HOST1" },
+	{ 0x22, "HOST2" },
+	{ 0x23, "HOST3" },
+	{ 0x24, "HOST4" },
+	{ 0x25, "HOST5" },
+	{ 0x26, "HOST6" },
+	{ 0x27, "HOST7" },
+	{ 0x28, "HOST8" },
+	{ 0x29, "HOST9" },
+	{ 0x2a, "HOST10" },
+	{ 0x2b, "HOST11" },
+	{ 0x2c, "HOST12" },
+	{ 0x2d, "HOST13" },
+	{ 0x2e, "HOST14" },
+	{ 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+	{}
+};
+
+static void
+tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	const u32 mask = (1 << fifo->pbdma_nr) - 1;
+	/*XXX: this is a bit of a guess at this point in time. */
+	nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
+}
+
+static const struct gk104_fifo_pbdma_func
+tu104_fifo_pbdma = {
+	.nr = gm200_fifo_pbdma_nr,
+	.init = tu104_fifo_pbdma_init,
+	.init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+tu104_fifo = {
+	.pbdma = &tu104_fifo_pbdma,
+	.fault.access = gv100_fifo_fault_access,
+	.fault.engine = tu104_fifo_fault_engine,
+	.fault.reason = gv100_fifo_fault_reason,
+	.fault.hubclient = gv100_fifo_fault_hubclient,
+	.fault.gpcclient = gv100_fifo_fault_gpcclient,
+	.runlist = &tu104_fifo_runlist,
+	.user = {{-1,-1,VOLTA_USERMODE_A       }, tu104_fifo_user_new   },
+	.chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+	.cgrp_force = true,
+};
+
+int
+tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
index ed84092..14b0c6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -3,4 +3,6 @@
 #include "priv.h"
 int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
 			struct nvkm_object **);
+int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+			struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
new file mode 100644
index 0000000..8f98548
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+		    enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+	struct nvkm_device *device = object->engine->subdev.device;
+	*addr = 0xbb0000 + device->func->resource_addr(device, 0);
+	*size = 0x010000;
+	*type = NVKM_OBJECT_MAP_IO;
+	return 0;
+}
+
+static const struct nvkm_object_func
+tu104_fifo_user = {
+	.map = tu104_fifo_user_map,
+};
+
+int
+tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
+{
+	return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 14be41f..4273401 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -197,7 +197,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
 	case NVKM_SUBDEV_PMU:
 		debug_reg = 0xc08;
 		break;
-	case NVKM_ENGINE_NVDEC:
+	case NVKM_ENGINE_NVDEC0:
 		debug_reg = 0xd00;
 		break;
 	case NVKM_ENGINE_SEC2:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index e583045..ab0282d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -5,3 +5,4 @@
 nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
+nvkm-y += nvkm/subdev/bar/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 243f0a5..209a6a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -36,6 +36,16 @@ nvkm_bar_bar1_vmm(struct nvkm_device *device)
 	return device->bar->func->bar1.vmm(device->bar);
 }
 
+void
+nvkm_bar_bar1_reset(struct nvkm_device *device)
+{
+	struct nvkm_bar *bar = device->bar;
+	if (bar) {
+		bar->func->bar1.init(bar);
+		bar->func->bar1.wait(bar);
+	}
+}
+
 struct nvkm_vmm *
 nvkm_bar_bar2_vmm(struct nvkm_device *device)
 {
@@ -49,6 +59,16 @@ nvkm_bar_bar2_vmm(struct nvkm_device *device)
 }
 
 void
+nvkm_bar_bar2_reset(struct nvkm_device *device)
+{
+	struct nvkm_bar *bar = device->bar;
+	if (bar && bar->bar2) {
+		bar->func->bar2.init(bar);
+		bar->func->bar2.wait(bar);
+	}
+}
+
+void
 nvkm_bar_bar2_fini(struct nvkm_device *device)
 {
 	struct nvkm_bar *bar = device->bar;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
new file mode 100644
index 0000000..ecaead1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+static void
+tu104_bar_bar2_wait(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0xb80f50) & 0x0000000c))
+			break;
+	);
+}
+
+static void
+tu104_bar_bar2_fini(struct nvkm_bar *bar)
+{
+	nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar2_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
+	struct gf100_bar *bar = gf100_bar(base);
+	u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+	if (bar->bar2_halve)
+		addr |= 0x40000000;
+	nvkm_wr32(device, 0xb80f48, 0x80000000 | addr);
+}
+
+static void
+tu104_bar_bar1_wait(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0xb80f50) & 0x00000003))
+			break;
+	);
+}
+
+static void
+tu104_bar_bar1_fini(struct nvkm_bar *bar)
+{
+	nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar1_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
+	struct gf100_bar *bar = gf100_bar(base);
+	const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+	nvkm_wr32(device, 0xb80f40, 0x80000000 | addr);
+}
+
+static const struct nvkm_bar_func
+tu104_bar = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.bar1.init = tu104_bar_bar1_init,
+	.bar1.fini = tu104_bar_bar1_fini,
+	.bar1.wait = tu104_bar_bar1_wait,
+	.bar1.vmm = gf100_bar_bar1_vmm,
+	.bar2.init = tu104_bar_bar2_init,
+	.bar2.fini = tu104_bar_bar2_fini,
+	.bar2.wait = tu104_bar_bar2_wait,
+	.bar2.vmm = gf100_bar_bar2_vmm,
+	.flush = g84_bar_flush,
+};
+
+int
+tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return gf100_bar_new_(&tu104_bar, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 50a4369..3ef505a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -13,3 +13,4 @@
 nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
+nvkm-y += nvkm/subdev/devinit/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 17235e9..59940da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -105,6 +105,15 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
 	return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
+void
+gm200_devinit_preos(struct nv50_devinit *init, bool post)
+{
+	/* Optional: Execute PRE_OS application on PMU, which should at
+	 * least take care of fans until a full PMU has been loaded.
+	 */
+	pmu_load(init, 0x01, post, NULL, NULL);
+}
+
 int
 gm200_devinit_post(struct nvkm_devinit *base, bool post)
 {
@@ -156,10 +165,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
 			return -ETIMEDOUT;
 	}
 
-	/* Optional: Execute PRE_OS application on PMU, which should at
-	 * least take care of fans until a full PMU has been loaded.
-	 */
-	pmu_load(init, 0x01, post, NULL, NULL);
+	gm200_devinit_preos(init, post);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9b9f0dc..72d130b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -26,4 +26,5 @@ void gf100_devinit_preinit(struct nvkm_devinit *);
 u64  gm107_devinit_disable(struct nvkm_devinit *);
 
 int gm200_devinit_post(struct nvkm_devinit *, bool);
+void gm200_devinit_preos(struct nv50_devinit *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
new file mode 100644
index 0000000..aae87b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvbios_pll info;
+	int head = type - PLL_VPLL0;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(device->bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+	case PLL_VPLL2:
+	case PLL_VPLL3:
+		nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
+		nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
+							    (N <<  8) |
+							    (M <<  0));
+		/*XXX*/
+		nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900);
+		nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014);
+		break;
+	default:
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+tu104_devinit_post(struct nvkm_devinit *base, bool post)
+{
+	struct nv50_devinit *init = nv50_devinit(base);
+	gm200_devinit_preos(init, post);
+	return 0;
+}
+
+static const struct nvkm_devinit_func
+tu104_devinit = {
+	.init = nv50_devinit_init,
+	.post = tu104_devinit_post,
+	.pll_set = tu104_devinit_pll_set,
+	.disable = gm107_devinit_disable,
+};
+
+int
+tu104_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 45bb46f..794eb17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,3 +1,4 @@
 nvkm-y += nvkm/subdev/fault/base.o
 nvkm-y += nvkm/subdev/fault/gp100.o
 nvkm-y += nvkm/subdev/fault/gv100.o
+nvkm-y += nvkm/subdev/fault/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 16ad91c..4ba1e21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -23,21 +23,19 @@
 
 #include <core/memory.h>
 #include <core/notify.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
 
 static void
 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
-	fault->func->buffer.fini(fault->buffer[index]);
+	fault->func->buffer.intr(fault->buffer[index], false);
 }
 
 static void
 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
-	fault->func->buffer.init(fault->buffer[index]);
+	fault->func->buffer.intr(fault->buffer[index], true);
 }
 
 static int
@@ -91,7 +89,6 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
 {
 	struct nvkm_subdev *subdev = &fault->subdev;
 	struct nvkm_device *device = subdev->device;
-	struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
 	struct nvkm_fault_buffer *buffer;
 	int ret;
 
@@ -99,7 +96,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
 		return -ENOMEM;
 	buffer->fault = fault;
 	buffer->id = id;
-	buffer->entries = fault->func->buffer.entries(buffer);
+	fault->func->buffer.info(buffer);
 	fault->buffer[id] = buffer;
 
 	nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
@@ -110,12 +107,12 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
 	if (ret)
 		return ret;
 
-	ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
-			   &buffer->vma);
-	if (ret)
-		return ret;
+	/* Pin fault buffer in BAR2. */
+	buffer->addr = nvkm_memory_bar2(buffer->mem);
+	if (buffer->addr == ~0ULL)
+		return -EFAULT;
 
-	return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
+	return 0;
 }
 
 static int
@@ -146,7 +143,6 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
 static void *
 nvkm_fault_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
 	struct nvkm_fault *fault = nvkm_fault(subdev);
 	int i;
 
@@ -154,7 +150,6 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
 
 	for (i = 0; i < fault->buffer_nr; i++) {
 		if (fault->buffer[i]) {
-			nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
 			nvkm_memory_unref(&fault->buffer[i]->mem);
 			kfree(fault->buffer[i]);
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 5e71db2..8fb96fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,7 +21,14 @@
  */
 #include "priv.h"
 
-#include <subdev/mmu.h>
+#include <subdev/mc.h>
+
+static void
+gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+}
 
 static void
 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
@@ -34,15 +41,17 @@ static void
 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
-	nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
-	nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
+	nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
+	nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
 }
 
-static u32
-gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
-	return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+	buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+	buffer->get = 0x002a7c;
+	buffer->put = 0x002a80;
 }
 
 static void
@@ -56,9 +65,10 @@ gp100_fault = {
 	.intr = gp100_fault_intr,
 	.buffer.nr = 1,
 	.buffer.entry_size = 32,
-	.buffer.entries = gp100_fault_buffer_entries,
+	.buffer.info = gp100_fault_buffer_info,
 	.buffer.init = gp100_fault_buffer_init,
 	.buffer.fini = gp100_fault_buffer_fini,
+	.buffer.intr = gp100_fault_buffer_intr,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 3cd610d..6fc54e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -30,9 +30,8 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	struct nvkm_memory *mem = buffer->mem;
-	const u32 foff = buffer->id * 0x14;
-	u32 get = nvkm_rd32(device, 0x100e2c + foff);
-	u32 put = nvkm_rd32(device, 0x100e30 + foff);
+	u32 get = nvkm_rd32(device, buffer->get);
+	u32 put = nvkm_rd32(device, buffer->put);
 	if (put == get)
 		return;
 
@@ -51,7 +50,7 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 
 		if (++get == buffer->entries)
 			get = 0;
-		nvkm_wr32(device, 0x100e2c + foff, get);
+		nvkm_wr32(device, buffer->get, get);
 
 		info.addr   = ((u64)addrhi << 32) | addrlo;
 		info.inst   = ((u64)insthi << 32) | instlo;
@@ -70,13 +69,21 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 }
 
 static void
-gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
-	const u32 foff = buffer->id * 0x14;
+	if (enable)
+		nvkm_mask(device, 0x100a2c, intr, intr);
+	else
+		nvkm_mask(device, 0x100a34, intr, intr);
+}
 
-	nvkm_mask(device, 0x100a34, intr, intr);
+static void
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 foff = buffer->id * 0x14;
 	nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
 }
 
@@ -84,23 +91,25 @@ static void
 gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
-	const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
 	const u32 foff = buffer->id * 0x14;
 
 	nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
-	nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
-	nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
+	nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
+	nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
 	nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
-	nvkm_mask(device, 0x100a2c, intr, intr);
 }
 
-static u32
-gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x14;
+
 	nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
-	return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+
+	buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+	buffer->get = 0x100e2c + foff;
+	buffer->put = 0x100e30 + foff;
 }
 
 static int
@@ -166,6 +175,8 @@ static void
 gv100_fault_fini(struct nvkm_fault *fault)
 {
 	nvkm_notify_put(&fault->nrpfb);
+	if (fault->buffer[0])
+		fault->func->buffer.fini(fault->buffer[0]);
 	nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
 }
 
@@ -173,14 +184,15 @@ static void
 gv100_fault_init(struct nvkm_fault *fault)
 {
 	nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
+	fault->func->buffer.init(fault->buffer[0]);
 	nvkm_notify_get(&fault->nrpfb);
 }
 
-static int
+int
 gv100_fault_oneinit(struct nvkm_fault *fault)
 {
 	return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
-				gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+				gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
 				&fault->nrpfb);
 }
 
@@ -192,9 +204,10 @@ gv100_fault = {
 	.intr = gv100_fault_intr,
 	.buffer.nr = 2,
 	.buffer.entry_size = 32,
-	.buffer.entries = gv100_fault_buffer_entries,
+	.buffer.info = gv100_fault_buffer_info,
 	.buffer.init = gv100_fault_buffer_init,
 	.buffer.fini = gv100_fault_buffer_fini,
+	.buffer.intr = gv100_fault_buffer_intr,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index e4d2f52..8ca8b28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -12,8 +12,10 @@ struct nvkm_fault_buffer {
 	struct nvkm_fault *fault;
 	int id;
 	int entries;
+	u32 get;
+	u32 put;
 	struct nvkm_memory *mem;
-	struct nvkm_vma *vma;
+	u64 addr;
 };
 
 int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
@@ -27,9 +29,12 @@ struct nvkm_fault_func {
 	struct {
 		int nr;
 		u32 entry_size;
-		u32 (*entries)(struct nvkm_fault_buffer *);
+		void (*info)(struct nvkm_fault_buffer *);
 		void (*init)(struct nvkm_fault_buffer *);
 		void (*fini)(struct nvkm_fault_buffer *);
+		void (*intr)(struct nvkm_fault_buffer *, bool enable);
 	} buffer;
 };
+
+int gv100_fault_oneinit(struct nvkm_fault *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
new file mode 100644
index 0000000..9c8a3ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+	/*XXX: Earlier versions of RM touched the old regs on Turing,
+	 *     which don't appear to actually work anymore, but newer
+	 *     versions of RM don't appear to touch anything at all..
+	 */
+}
+
+static void
+tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 foff = buffer->id * 0x20;
+	nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 foff = buffer->id * 0x20;
+
+	nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
+	nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
+	nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
+	nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x80000000);
+}
+
+static void
+tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 foff = buffer->id * 0x20;
+
+	nvkm_mask(device, 0xb83010 + foff, 0x40000000, 0x40000000);
+
+	buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff;
+	buffer->get = 0xb83008 + foff;
+	buffer->put = 0xb8300c + foff;
+}
+
+static void
+tu104_fault_intr_fault(struct nvkm_fault *fault)
+{
+	struct nvkm_subdev *subdev = &fault->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fault_data info;
+	const u32 addrlo = nvkm_rd32(device, 0xb83080);
+	const u32 addrhi = nvkm_rd32(device, 0xb83084);
+	const u32  info0 = nvkm_rd32(device, 0xb83088);
+	const u32 insthi = nvkm_rd32(device, 0xb8308c);
+	const u32  info1 = nvkm_rd32(device, 0xb83090);
+
+	info.addr = ((u64)addrhi << 32) | addrlo;
+	info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
+	info.time = 0;
+	info.engine = (info0 & 0x000000ff);
+	info.valid  = (info1 & 0x80000000) >> 31;
+	info.gpc    = (info1 & 0x1f000000) >> 24;
+	info.hub    = (info1 & 0x00100000) >> 20;
+	info.access = (info1 & 0x000f0000) >> 16;
+	info.client = (info1 & 0x00007f00) >> 8;
+	info.reason = (info1 & 0x0000001f);
+
+	nvkm_fifo_fault(device->fifo, &info);
+}
+
+static void
+tu104_fault_intr(struct nvkm_fault *fault)
+{
+	struct nvkm_subdev *subdev = &fault->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0xb83094);
+
+	if (stat & 0x80000000) {
+		tu104_fault_intr_fault(fault);
+		nvkm_wr32(device, 0xb83094, 0x80000000);
+		stat &= ~0x80000000;
+	}
+
+	if (stat & 0x00000200) {
+		if (fault->buffer[0]) {
+			nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+			stat &= ~0x00000200;
+		}
+	}
+
+	/*XXX: guess, can't confirm until we get fw... */
+	if (stat & 0x00000100) {
+		if (fault->buffer[1]) {
+			nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+			stat &= ~0x00000100;
+		}
+	}
+
+	if (stat) {
+		nvkm_debug(subdev, "intr %08x\n", stat);
+	}
+}
+
+static void
+tu104_fault_fini(struct nvkm_fault *fault)
+{
+	nvkm_notify_put(&fault->nrpfb);
+	if (fault->buffer[0])
+		fault->func->buffer.fini(fault->buffer[0]);
+	/*XXX: disable priv faults */
+}
+
+static void
+tu104_fault_init(struct nvkm_fault *fault)
+{
+	/*XXX: enable priv faults */
+	fault->func->buffer.init(fault->buffer[0]);
+	nvkm_notify_get(&fault->nrpfb);
+}
+
+static const struct nvkm_fault_func
+tu104_fault = {
+	.oneinit = gv100_fault_oneinit,
+	.init = tu104_fault_init,
+	.fini = tu104_fault_fini,
+	.intr = tu104_fault_intr,
+	.buffer.nr = 2,
+	.buffer.entry_size = 32,
+	.buffer.info = tu104_fault_buffer_info,
+	.buffer.init = tu104_fault_buffer_init,
+	.buffer.fini = tu104_fault_buffer_fini,
+	.buffer.intr = tu104_fault_buffer_intr,
+};
+
+int
+tu104_fault_new(struct nvkm_device *device, int index,
+		struct nvkm_fault **pfault)
+{
+	return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 434d2fc..b2bb5a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -68,10 +68,13 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
 
 	if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
 		switch (M0203E.type) {
-		case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
-		case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
-		case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
-		case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
+		case M0203E_TYPE_DDR2  : return NVKM_RAM_TYPE_DDR2;
+		case M0203E_TYPE_DDR3  : return NVKM_RAM_TYPE_DDR3;
+		case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
+		case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
+		case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
+		case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
+		case M0203E_TYPE_HBM2  : return NVKM_RAM_TYPE_HBM2;
 		default:
 			nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
 			return NVKM_RAM_TYPE_UNKNOWN;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index 24c7bd5..b11867f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -184,6 +184,9 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 		[NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
 		[NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
 		[NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
+		[NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
+		[NVKM_RAM_TYPE_GDDR6  ] = "GDDR6",
+		[NVKM_RAM_TYPE_HBM2   ] = "HBM2",
 	};
 	struct nvkm_subdev *subdev = &fb->subdev;
 	int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index db48a1d..02c4eb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -288,6 +288,19 @@ nv50_instobj_addr(struct nvkm_memory *memory)
 	return nvkm_memory_addr(nv50_instobj(memory)->ram);
 }
 
+static u64
+nv50_instobj_bar2(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	u64 addr = ~0ULL;
+	if (nv50_instobj_acquire(&iobj->base.memory)) {
+		iobj->lru.next = NULL; /* Exclude from eviction. */
+		addr = iobj->bar->addr;
+	}
+	nv50_instobj_release(&iobj->base.memory);
+	return addr;
+}
+
 static enum nvkm_memory_target
 nv50_instobj_target(struct nvkm_memory *memory)
 {
@@ -325,8 +338,9 @@ static const struct nvkm_memory_func
 nv50_instobj_func = {
 	.dtor = nv50_instobj_dtor,
 	.target = nv50_instobj_target,
-	.size = nv50_instobj_size,
+	.bar2 = nv50_instobj_bar2,
 	.addr = nv50_instobj_addr,
+	.size = nv50_instobj_size,
 	.boot = nv50_instobj_boot,
 	.acquire = nv50_instobj_acquire,
 	.release = nv50_instobj_release,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2befbe3..f3b06329c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -12,3 +12,4 @@
 nvkm-y += nvkm/subdev/mc/gk20a.o
 nvkm-y += nvkm/subdev/mc/gp100.o
 nvkm-y += nvkm/subdev/mc/gp10b.o
+nvkm-y += nvkm/subdev/mc/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 09f669a..0e57ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -108,6 +108,9 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
 	if (stat)
 		nvkm_error(&mc->subdev, "intr %08x\n", stat);
 	*handled = intr != 0;
+
+	if (mc->func->intr_hack)
+		mc->func->intr_hack(mc, handled);
 }
 
 static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d9e3691..eb91a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -26,6 +26,7 @@ struct nvkm_mc_func {
 	void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
 	/* retrieve pending interrupt mask (NV_PMC_INTR) */
 	u32 (*intr_stat)(struct nvkm_mc *);
+	void (*intr_hack)(struct nvkm_mc *, bool *handled);
 	const struct nvkm_mc_map *reset;
 	void (*unk260)(struct nvkm_mc *, u32);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
new file mode 100644
index 0000000..b7165bd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	u32 stat = nvkm_rd32(device, 0xb81010);
+	if (stat & 0x00000050) {
+		struct nvkm_subdev *subdev =
+			nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
+		nvkm_wr32(device, 0xb81010, stat & 0x00000050);
+		if (subdev)
+			nvkm_subdev_intr(subdev);
+		*handled = true;
+	}
+}
+
+static const struct nvkm_mc_func
+tu104_mc = {
+	.init = nv50_mc_init,
+	.intr = gp100_mc_intr,
+	.intr_unarm = gp100_mc_intr_unarm,
+	.intr_rearm = gp100_mc_intr_rearm,
+	.intr_mask = gp100_mc_intr_mask,
+	.intr_stat = gf100_mc_intr_stat,
+	.intr_hack = tu104_mc_intr_hack,
+	.reset = gk104_mc_reset,
+};
+
+int
+tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return gp100_mc_new_(&tu104_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 58a24e3..8966180 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -13,6 +13,7 @@
 nvkm-y += nvkm/subdev/mmu/gp100.o
 nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
+nvkm-y += nvkm/subdev/mmu/tu104.o
 
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -33,6 +34,7 @@
 nvkm-y += nvkm/subdev/mmu/vmmgp100.o
 nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
 nvkm-y += nvkm/subdev/mmu/vmmgv100.o
+nvkm-y += nvkm/subdev/mmu/vmmtu104.o
 
 nvkm-y += nvkm/subdev/mmu/umem.o
 nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
new file mode 100644
index 0000000..8e6f409
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+tu104_mmu = {
+	.dma_bits = 47,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 37b201b..6889076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 			goto fail;
 		}
 
-		if (vma->addr != addr) {
-			const u64 tail = vma->size + vma->addr - addr;
-			if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
-				goto fail;
-			vma->part = true;
-			nvkm_vmm_node_insert(vmm, vma);
-		}
-
-		if (vma->size != size) {
-			const u64 tail = vma->size - size;
-			struct nvkm_vma *tmp;
-			if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
-				nvkm_vmm_unmap_region(vmm, vma);
-				goto fail;
-			}
-			tmp->part = true;
-			nvkm_vmm_node_insert(vmm, tmp);
+		vma = nvkm_vmm_node_split(vmm, vma, addr, size);
+		if (!vma) {
+			ret = -ENOMEM;
+			goto fail;
 		}
 	}
 	vma->busy = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 7459def..6b87fff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
 	return new;
 }
 
+static inline void
+nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	rb_erase(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	nvkm_vmm_free_remove(vmm, vma);
+	list_del(&vma->head);
+	kfree(vma);
+}
+
 static void
 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 	rb_insert_color(&vma->tree, &vmm->free);
 }
 
-void
+static inline void
+nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	rb_erase(&vma->tree, &vmm->root);
+}
+
+static inline void
+nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	nvkm_vmm_node_remove(vmm, vma);
+	list_del(&vma->head);
+	kfree(vma);
+}
+
+static void
 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
 	struct rb_node **ptr = &vmm->root.rb_node;
@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
 	return NULL;
 }
 
+#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL :             \
+	list_entry((root)->head.dir, struct nvkm_vma, head))
+
+static struct nvkm_vma *
+nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
+		    struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
+{
+	if (next) {
+		if (vma->size == size) {
+			vma->size += next->size;
+			nvkm_vmm_node_delete(vmm, next);
+			if (prev) {
+				prev->size += vma->size;
+				nvkm_vmm_node_delete(vmm, vma);
+				return prev;
+			}
+			return vma;
+		}
+		BUG_ON(prev);
+
+		nvkm_vmm_node_remove(vmm, next);
+		vma->size -= size;
+		next->addr -= size;
+		next->size += size;
+		nvkm_vmm_node_insert(vmm, next);
+		return next;
+	}
+
+	if (prev) {
+		if (vma->size != size) {
+			nvkm_vmm_node_remove(vmm, vma);
+			prev->size += size;
+			vma->addr += size;
+			vma->size -= size;
+			nvkm_vmm_node_insert(vmm, vma);
+		} else {
+			prev->size += vma->size;
+			nvkm_vmm_node_delete(vmm, vma);
+		}
+		return prev;
+	}
+
+	return vma;
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_split(struct nvkm_vmm *vmm,
+		    struct nvkm_vma *vma, u64 addr, u64 size)
+{
+	struct nvkm_vma *prev = NULL;
+
+	if (vma->addr != addr) {
+		prev = vma;
+		if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
+			return NULL;
+		vma->part = true;
+		nvkm_vmm_node_insert(vmm, vma);
+	}
+
+	if (vma->size != size) {
+		struct nvkm_vma *tmp;
+		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+			nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
+			return NULL;
+		}
+		tmp->part = true;
+		nvkm_vmm_node_insert(vmm, tmp);
+	}
+
+	return vma;
+}
+
 static void
 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
 {
@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
 	return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
 }
 
-#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL :              \
-	list_entry((root)->head.dir, struct nvkm_vma, head)
-
 void
 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
-	struct nvkm_vma *next;
+	struct nvkm_vma *next = node(vma, next);
+	struct nvkm_vma *prev = NULL;
 
 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
 	nvkm_memory_unref(&vma->memory);
 
-	if (vma->part) {
-		struct nvkm_vma *prev = node(vma, prev);
-		if (!prev->memory) {
-			prev->size += vma->size;
-			rb_erase(&vma->tree, &vmm->root);
-			list_del(&vma->head);
-			kfree(vma);
-			vma = prev;
-		}
-	}
-
-	next = node(vma, next);
-	if (next && next->part) {
-		if (!next->memory) {
-			vma->size += next->size;
-			rb_erase(&next->tree, &vmm->root);
-			list_del(&next->head);
-			kfree(next);
-		}
-	}
+	if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+		prev = NULL;
+	if (!next->part || next->memory)
+		next = NULL;
+	nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
 }
 
 void
@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 	struct nvkm_vma *prev, *next;
 
 	if ((prev = node(vma, prev)) && !prev->used) {
-		rb_erase(&prev->tree, &vmm->free);
-		list_del(&prev->head);
 		vma->addr  = prev->addr;
 		vma->size += prev->size;
-		kfree(prev);
+		nvkm_vmm_free_delete(vmm, prev);
 	}
 
 	if ((next = node(vma, next)) && !next->used) {
-		rb_erase(&next->tree, &vmm->free);
-		list_del(&next->head);
 		vma->size += next->size;
-		kfree(next);
+		nvkm_vmm_free_delete(vmm, next);
 	}
 
 	nvkm_vmm_free_insert(vmm, vma);
@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 	}
 
 	/* Remove VMA from the list of allocated nodes. */
-	rb_erase(&vma->tree, &vmm->root);
+	nvkm_vmm_node_remove(vmm, vma);
 
 	/* Merge VMA back into the free list. */
 	vma->page = NVKM_VMA_PAGE_NONE;
@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
 			tail = ALIGN_DOWN(tail, vmm->func->page_block);
 
 		if (addr <= tail && tail - addr >= size) {
-			rb_erase(&this->tree, &vmm->free);
+			nvkm_vmm_free_remove(vmm, this);
 			vma = this;
 			break;
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 1a3b0a3..42ad326 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
 		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
 		  const char *name, struct nvkm_vmm *);
 struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
+				     u64 addr, u64 size);
 int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
 			bool sparse, u8 page, u8 align, u64 size,
 			struct nvkm_vma **pvma);
@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
 void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
 
 struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
-void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
 
 int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
 		  u64, u64, void *, u32, struct lock_class_key *,
@@ -200,6 +201,8 @@ int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 void gp100_vmm_flush(struct nvkm_vmm *, int);
 
+int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
 int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 		 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
@@ -239,6 +242,9 @@ int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
+int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
 
 #define VMM_PRINT(l,v,p,f,a...) do {                                           \
 	struct nvkm_vmm *_vmm = (v);                                           \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
new file mode 100644
index 0000000..adaadd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 type = depth << 24; /*XXX: not confirmed */
+
+	type = 0x00000001; /* PAGE_ALL */
+	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+		type |= 0x00000004; /* HUB_ONLY */
+
+	mutex_lock(&subdev->mutex);
+
+	nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+	nvkm_wr32(device, 0xb830a4, 0x00000000);
+	nvkm_wr32(device, 0x100e68, 0x00000000);
+	nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
+
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
+			break;
+	);
+
+	mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+tu104_vmm = {
+	.join = gv100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gp100_vmm_valid,
+	.flush = tu104_vmm_flush,
+	.page = {
+		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+		{}
+	}
+};
+
+int
+tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 1f7a3c1..84a2f24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -59,10 +59,10 @@ gp102_run_secure_scrub(struct nvkm_secboot *sb)
 
 	nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
 
-	engine = nvkm_engine_ref(&device->nvdec->engine);
+	engine = nvkm_engine_ref(&device->nvdec[0]->engine);
 	if (IS_ERR(engine))
 		return PTR_ERR(engine);
-	falcon = device->nvdec->falcon;
+	falcon = device->nvdec[0]->falcon;
 
 	nvkm_falcon_get(falcon, &sb->subdev);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 36de23d..dd92203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -23,6 +23,42 @@
  */
 #include "priv.h"
 
+s64
+nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
+{
+	struct nvkm_subdev *subdev = &wait->tmr->subdev;
+	u64 time = nvkm_timer_read(wait->tmr);
+
+	if (wait->reads == 0) {
+		wait->time0 = time;
+		wait->time1 = time;
+	}
+
+	if (wait->time1 == time) {
+		if (wait->reads++ == 16) {
+			nvkm_fatal(subdev, "stalled at %016llx\n", time);
+			return -ETIMEDOUT;
+		}
+	} else {
+		wait->time1 = time;
+		wait->reads = 1;
+	}
+
+	if (wait->time1 - wait->time0 > wait->limit)
+		return -ETIMEDOUT;
+
+	return wait->time1 - wait->time0;
+}
+
+void
+nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
+		     struct nvkm_timer_wait *wait)
+{
+	wait->tmr = device->timer;
+	wait->limit = nsec;
+	wait->reads = 0;
+}
+
 u64
 nvkm_timer_read(struct nvkm_timer *tmr)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 4f1f3e8..39081ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -86,7 +86,7 @@ gk104_top_oneinit(struct nvkm_top *top)
 		case 0x0000000d: A_(SEC2  ); break;
 		case 0x0000000e: B_(NVENC ); break;
 		case 0x0000000f: A_(NVENC1); break;
-		case 0x00000010: A_(NVDEC ); break;
+		case 0x00000010: B_(NVDEC ); break;
 		case 0x00000013: B_(CE    ); break;
 			break;
 		default:
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6020c30..3f35377 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -90,6 +90,18 @@
 	  Say Y here if you want to enable support for LG4573 RGB panel.
 	  To compile this driver as a module, choose M here.
 
+config DRM_PANEL_OLIMEX_LCD_OLINUXINO
+	tristate "Olimex LCD-OLinuXino panel"
+	depends on OF
+	depends on I2C
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  The panel is used with different sizes LCDs, from 480x272 to
+	  1280x800, and 24 bit per pixel.
+
+	  Say Y here if you want to enable support for Olimex Ltd.
+	  LCD-OLinuXino panel.
+
 config DRM_PANEL_ORISETECH_OTM8009A
 	tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
 	depends on OF
@@ -126,6 +138,12 @@
 	  Say Y here if you want to enable support for Raydium RM68200
 	  720x1280 DSI video mode panel.
 
+config DRM_PANEL_SAMSUNG_S6D16D0
+	tristate "Samsung S6D16D0 DSI video mode panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	select VIDEOMODE_HELPERS
+
 config DRM_PANEL_SAMSUNG_S6E3HA2
 	tristate "Samsung S6E3HA2 DSI video mode panel"
 	depends on OF
@@ -186,4 +204,11 @@
 	  Say Y here if you want to enable support for the Sitronix
 	  ST7789V controller for 240x320 LCD panels
 
+config DRM_PANEL_TRULY_NT35597_WQXGA
+	tristate "Truly WQXGA"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	help
+	  Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
+	  Video Mode panel
 endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5ccaaa9..4396658 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,11 +7,13 @@
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
 obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
 obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
 obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
 obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
@@ -19,3 +21,4 @@
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 72edb33..ca4ae45 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -506,8 +506,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
 
 static void innolux_panel_del(struct innolux_panel *innolux)
 {
-	if (innolux->base.dev)
-		drm_panel_remove(&innolux->base);
+	drm_panel_remove(&innolux->base);
 }
 
 static int innolux_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
new file mode 100644
index 0000000..5e8d452
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * LCD-OLinuXino support for panel driver
+ *
+ * Copyright (C) 2018 Olimex Ltd.
+ *   Author: Stefan Mavrodiev <stefan@olimex.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/crc32.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#define LCD_OLINUXINO_HEADER_MAGIC	0x4F4CB727
+#define LCD_OLINUXINO_DATA_LEN		256
+
+struct lcd_olinuxino_mode {
+	u32 pixelclock;
+	u32 hactive;
+	u32 hfp;
+	u32 hbp;
+	u32 hpw;
+	u32 vactive;
+	u32 vfp;
+	u32 vbp;
+	u32 vpw;
+	u32 refresh;
+	u32 flags;
+};
+
+struct lcd_olinuxino_info {
+	char name[32];
+	u32 width_mm;
+	u32 height_mm;
+	u32 bpc;
+	u32 bus_format;
+	u32 bus_flag;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino_eeprom {
+	u32 header;
+	u32 id;
+	char revision[4];
+	u32 serial;
+	struct lcd_olinuxino_info info;
+	u32 num_modes;
+	u8 reserved[180];
+	u32 checksum;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino {
+	struct drm_panel panel;
+	struct device *dev;
+	struct i2c_client *client;
+	struct mutex mutex;
+
+	bool prepared;
+	bool enabled;
+
+	struct backlight_device *backlight;
+	struct regulator *supply;
+	struct gpio_desc *enable_gpio;
+
+	struct lcd_olinuxino_eeprom eeprom;
+};
+
+static inline struct lcd_olinuxino *to_lcd_olinuxino(struct drm_panel *panel)
+{
+	return container_of(panel, struct lcd_olinuxino, panel);
+}
+
+static int lcd_olinuxino_disable(struct drm_panel *panel)
+{
+	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+	if (!lcd->enabled)
+		return 0;
+
+	backlight_disable(lcd->backlight);
+
+	lcd->enabled = false;
+
+	return 0;
+}
+
+static int lcd_olinuxino_unprepare(struct drm_panel *panel)
+{
+	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+	if (!lcd->prepared)
+		return 0;
+
+	gpiod_set_value_cansleep(lcd->enable_gpio, 0);
+	regulator_disable(lcd->supply);
+
+	lcd->prepared = false;
+
+	return 0;
+}
+
+static int lcd_olinuxino_prepare(struct drm_panel *panel)
+{
+	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+	int ret;
+
+	if (lcd->prepared)
+		return 0;
+
+	ret = regulator_enable(lcd->supply);
+	if (ret < 0)
+		return ret;
+
+	gpiod_set_value_cansleep(lcd->enable_gpio, 1);
+	lcd->prepared = true;
+
+	return 0;
+}
+
+static int lcd_olinuxino_enable(struct drm_panel *panel)
+{
+	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+	if (lcd->enabled)
+		return 0;
+
+	backlight_enable(lcd->backlight);
+
+	lcd->enabled = true;
+
+	return 0;
+}
+
+static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+{
+	struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+	struct drm_connector *connector = lcd->panel.connector;
+	struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
+	struct drm_device *drm = lcd->panel.drm;
+	struct lcd_olinuxino_mode *lcd_mode;
+	struct drm_display_mode *mode;
+	u32 i, num = 0;
+
+	for (i = 0; i < lcd->eeprom.num_modes; i++) {
+		lcd_mode = (struct lcd_olinuxino_mode *)
+			   &lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
+
+		mode = drm_mode_create(drm);
+		if (!mode) {
+			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+				lcd_mode->hactive,
+				lcd_mode->vactive,
+				lcd_mode->refresh);
+				continue;
+		}
+
+		mode->clock = lcd_mode->pixelclock;
+		mode->hdisplay = lcd_mode->hactive;
+		mode->hsync_start = lcd_mode->hactive + lcd_mode->hfp;
+		mode->hsync_end = lcd_mode->hactive + lcd_mode->hfp +
+				  lcd_mode->hpw;
+		mode->htotal = lcd_mode->hactive + lcd_mode->hfp +
+			       lcd_mode->hpw + lcd_mode->hbp;
+		mode->vdisplay = lcd_mode->vactive;
+		mode->vsync_start = lcd_mode->vactive + lcd_mode->vfp;
+		mode->vsync_end = lcd_mode->vactive + lcd_mode->vfp +
+				  lcd_mode->vpw;
+		mode->vtotal = lcd_mode->vactive + lcd_mode->vfp +
+			       lcd_mode->vpw + lcd_mode->vbp;
+		mode->vrefresh = lcd_mode->refresh;
+
+		/* Always make the first mode preferred */
+		if (i == 0)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+
+		num++;
+	}
+
+	memcpy(connector->display_info.name, lcd_info->name, 32);
+	connector->display_info.width_mm = lcd_info->width_mm;
+	connector->display_info.height_mm = lcd_info->height_mm;
+	connector->display_info.bpc = lcd_info->bpc;
+
+	if (lcd_info->bus_format)
+		drm_display_info_set_bus_formats(&connector->display_info,
+						 &lcd_info->bus_format, 1);
+	connector->display_info.bus_flags = lcd_info->bus_flag;
+
+	return num;
+}
+
+static const struct drm_panel_funcs lcd_olinuxino_funcs = {
+	.disable = lcd_olinuxino_disable,
+	.unprepare = lcd_olinuxino_unprepare,
+	.prepare = lcd_olinuxino_prepare,
+	.enable = lcd_olinuxino_enable,
+	.get_modes = lcd_olinuxino_get_modes,
+};
+
+static int lcd_olinuxino_probe(struct i2c_client *client,
+			       const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct lcd_olinuxino *lcd;
+	u32 checksum, i;
+	int ret = 0;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+				     I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+		return -ENODEV;
+
+	lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
+	if (!lcd)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, lcd);
+	lcd->dev = dev;
+	lcd->client = client;
+
+	mutex_init(&lcd->mutex);
+
+	/* Copy data into buffer */
+	for (i = 0; i < LCD_OLINUXINO_DATA_LEN; i += I2C_SMBUS_BLOCK_MAX) {
+		mutex_lock(&lcd->mutex);
+		ret = i2c_smbus_read_i2c_block_data(client,
+						    i,
+						    I2C_SMBUS_BLOCK_MAX,
+						    (u8 *)&lcd->eeprom + i);
+		mutex_unlock(&lcd->mutex);
+		if (ret < 0) {
+			dev_err(dev, "error reading from device at %02x\n", i);
+			return ret;
+		}
+	}
+
+	/* Check configuration checksum */
+	checksum = ~crc32(~0, (u8 *)&lcd->eeprom, 252);
+	if (checksum != lcd->eeprom.checksum) {
+		dev_err(dev, "configuration checksum does not match!\n");
+		return -EINVAL;
+	}
+
+	/* Check magic header */
+	if (lcd->eeprom.header != LCD_OLINUXINO_HEADER_MAGIC) {
+		dev_err(dev, "magic header does not match\n");
+		return -EINVAL;
+	}
+
+	dev_info(dev, "Detected %s, Rev. %s, Serial: %08x\n",
+		 lcd->eeprom.info.name,
+		 lcd->eeprom.revision,
+		 lcd->eeprom.serial);
+
+	/*
+	 * The eeprom can hold up to 4 modes.
+	 * If the stored value is bigger, overwrite it.
+	 */
+	if (lcd->eeprom.num_modes > 4) {
+		dev_warn(dev, "invalid number of modes, falling back to 4\n");
+		lcd->eeprom.num_modes = 4;
+	}
+
+	lcd->enabled = false;
+	lcd->prepared = false;
+
+	lcd->supply = devm_regulator_get(dev, "power");
+	if (IS_ERR(lcd->supply))
+		return PTR_ERR(lcd->supply);
+
+	lcd->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+	if (IS_ERR(lcd->enable_gpio))
+		return PTR_ERR(lcd->enable_gpio);
+
+	lcd->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(lcd->backlight))
+		return PTR_ERR(lcd->backlight);
+
+	drm_panel_init(&lcd->panel);
+	lcd->panel.dev = dev;
+	lcd->panel.funcs = &lcd_olinuxino_funcs;
+
+	return drm_panel_add(&lcd->panel);
+}
+
+static int lcd_olinuxino_remove(struct i2c_client *client)
+{
+	struct lcd_olinuxino *panel = i2c_get_clientdata(client);
+
+	drm_panel_remove(&panel->panel);
+
+	lcd_olinuxino_disable(&panel->panel);
+	lcd_olinuxino_unprepare(&panel->panel);
+
+	return 0;
+}
+
+static const struct of_device_id lcd_olinuxino_of_ids[] = {
+	{ .compatible = "olimex,lcd-olinuxino" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lcd_olinuxino_of_ids);
+
+static struct i2c_driver lcd_olinuxino_driver = {
+	.driver = {
+		.name = "lcd_olinuxino",
+		.of_match_table = lcd_olinuxino_of_ids,
+	},
+	.probe = lcd_olinuxino_probe,
+	.remove = lcd_olinuxino_remove,
+};
+
+module_i2c_driver(lcd_olinuxino_driver);
+
+MODULE_AUTHOR("Stefan Mavrodiev <stefan@olimex.com>");
+MODULE_DESCRIPTION("LCD-OLinuXino driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
new file mode 100644
index 0000000..33c22ee
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Samsung s6d16d0 panel driver. This is a 864x480
+ * AMOLED panel with a command-only DSI interface.
+ */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+struct s6d16d0 {
+	struct device *dev;
+	struct drm_panel panel;
+	struct regulator *supply;
+	struct gpio_desc *reset_gpio;
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode.
+ */
+static const struct drm_display_mode samsung_s6d16d0_mode = {
+	/* HS clock, (htotal*vtotal*vrefresh)/1000 */
+	.clock = 420160,
+	.hdisplay = 864,
+	.hsync_start = 864 + 154,
+	.hsync_end = 864 + 154 + 16,
+	.htotal = 864 + 154 + 16 + 32,
+	.vdisplay = 480,
+	.vsync_start = 480 + 1,
+	.vsync_end = 480 + 1 + 1,
+	.vtotal = 480 + 1 + 1 + 1,
+	/*
+	 * This depends on the clocking HS vs LP rate, this value
+	 * is calculated as:
+	 * vrefresh = (clock * 1000) / (htotal*vtotal)
+	 */
+	.vrefresh = 816,
+	.width_mm = 84,
+	.height_mm = 48,
+};
+
+static inline struct s6d16d0 *panel_to_s6d16d0(struct drm_panel *panel)
+{
+	return container_of(panel, struct s6d16d0, panel);
+}
+
+static int s6d16d0_unprepare(struct drm_panel *panel)
+{
+	struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+	int ret;
+
+	/* Enter sleep mode */
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to enter sleep mode (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	/* Assert RESET */
+	gpiod_set_value_cansleep(s6->reset_gpio, 1);
+	regulator_disable(s6->supply);
+
+	return 0;
+}
+
+static int s6d16d0_prepare(struct drm_panel *panel)
+{
+	struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+	int ret;
+
+	ret = regulator_enable(s6->supply);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to enable supply (%d)\n", ret);
+		return ret;
+	}
+
+	/* Assert RESET */
+	gpiod_set_value_cansleep(s6->reset_gpio, 1);
+	udelay(10);
+	/* De-assert RESET */
+	gpiod_set_value_cansleep(s6->reset_gpio, 0);
+	msleep(120);
+
+	/* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+	ret = mipi_dsi_dcs_set_tear_on(dsi,
+				       MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
+			      ret);
+		return ret;
+	}
+	/* Exit sleep mode and power on */
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to exit sleep mode (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int s6d16d0_enable(struct drm_panel *panel)
+{
+	struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+	int ret;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to turn display on (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int s6d16d0_disable(struct drm_panel *panel)
+{
+	struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+	int ret;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(s6->dev, "failed to turn display off (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int s6d16d0_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct drm_display_mode *mode;
+
+	strncpy(connector->display_info.name, "Samsung S6D16D0\0",
+		DRM_DISPLAY_INFO_LEN);
+
+	mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+	if (!mode) {
+		DRM_ERROR("bad mode or failed to add mode\n");
+		return -EINVAL;
+	}
+	drm_mode_set_name(mode);
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	drm_mode_probed_add(connector, mode);
+
+	return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs s6d16d0_drm_funcs = {
+	.disable = s6d16d0_disable,
+	.unprepare = s6d16d0_unprepare,
+	.prepare = s6d16d0_prepare,
+	.enable = s6d16d0_enable,
+	.get_modes = s6d16d0_get_modes,
+};
+
+static int s6d16d0_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct s6d16d0 *s6;
+	int ret;
+
+	s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
+	if (!s6)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, s6);
+	s6->dev = dev;
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->hs_rate = 420160000;
+	dsi->lp_rate = 19200000;
+	/*
+	 * This display uses command mode so no MIPI_DSI_MODE_VIDEO
+	 * or MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+	 *
+	 * As we only send commands we do not need to be continuously
+	 * clocked.
+	 */
+	dsi->mode_flags =
+		MIPI_DSI_CLOCK_NON_CONTINUOUS |
+		MIPI_DSI_MODE_EOT_PACKET;
+
+	s6->supply = devm_regulator_get(dev, "vdd1");
+	if (IS_ERR(s6->supply))
+		return PTR_ERR(s6->supply);
+
+	/* This asserts RESET by default */
+	s6->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+						 GPIOD_OUT_HIGH);
+	if (IS_ERR(s6->reset_gpio)) {
+		ret = PTR_ERR(s6->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+				      ret);
+		return ret;
+	}
+
+	drm_panel_init(&s6->panel);
+	s6->panel.dev = dev;
+	s6->panel.funcs = &s6d16d0_drm_funcs;
+
+	ret = drm_panel_add(&s6->panel);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0)
+		drm_panel_remove(&s6->panel);
+
+	return ret;
+}
+
+static int s6d16d0_remove(struct mipi_dsi_device *dsi)
+{
+	struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&s6->panel);
+
+	return 0;
+}
+
+static const struct of_device_id s6d16d0_of_match[] = {
+	{ .compatible = "samsung,s6d16d0" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, s6d16d0_of_match);
+
+static struct mipi_dsi_driver s6d16d0_driver = {
+	.probe = s6d16d0_probe,
+	.remove = s6d16d0_remove,
+	.driver = {
+		.name = "panel-samsung-s6d16d0",
+		.of_match_table = s6d16d0_of_match,
+	},
+};
+module_mipi_dsi_driver(s6d16d0_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI s6d16d0 Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 75f9253..2d99e28 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 NXP Semiconductors.
  * Author: Marco Franchi <marco.franchi@nxp.com>
  *
  * Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
  */
 
 #include <linux/backlight.h>
@@ -366,6 +363,6 @@ static struct platform_driver seiko_panel_platform_driver = {
 };
 module_platform_driver(seiko_panel_platform_driver);
 
-MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com");
+MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com>");
 MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a04ffb3..9c69e73 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = {
 	},
 };
 
+static const struct drm_display_mode auo_g101evn010_mode = {
+	.clock = 68930,
+	.hdisplay = 1280,
+	.hsync_start = 1280 + 82,
+	.hsync_end = 1280 + 82 + 2,
+	.htotal = 1280 + 82 + 2 + 84,
+	.vdisplay = 800,
+	.vsync_start = 800 + 8,
+	.vsync_end = 800 + 8 + 2,
+	.vtotal = 800 + 8 + 2 + 6,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc auo_g101evn010 = {
+	.modes = &auo_g101evn010_mode,
+	.num_modes = 1,
+	.bpc = 6,
+	.size = {
+		.width = 216,
+		.height = 135,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct drm_display_mode auo_g104sn02_mode = {
 	.clock = 40000,
 	.hdisplay = 800,
@@ -782,16 +806,38 @@ static const struct panel_desc avic_tm070ddh03 = {
 	},
 };
 
+static const struct drm_display_mode bananapi_s070wv20_ct16_mode = {
+	.clock = 30000,
+	.hdisplay = 800,
+	.hsync_start = 800 + 40,
+	.hsync_end = 800 + 40 + 48,
+	.htotal = 800 + 40 + 48 + 40,
+	.vdisplay = 480,
+	.vsync_start = 480 + 13,
+	.vsync_end = 480 + 13 + 3,
+	.vtotal = 480 + 13 + 3 + 29,
+};
+
+static const struct panel_desc bananapi_s070wv20_ct16 = {
+	.modes = &bananapi_s070wv20_ct16_mode,
+	.num_modes = 1,
+	.bpc = 6,
+	.size = {
+		.width = 154,
+		.height = 86,
+	},
+};
+
 static const struct drm_display_mode boe_hv070wsa_mode = {
-	.clock = 40800,
+	.clock = 42105,
 	.hdisplay = 1024,
-	.hsync_start = 1024 + 90,
-	.hsync_end = 1024 + 90 + 90,
-	.htotal = 1024 + 90 + 90 + 90,
+	.hsync_start = 1024 + 30,
+	.hsync_end = 1024 + 30 + 30,
+	.htotal = 1024 + 30 + 30 + 30,
 	.vdisplay = 600,
-	.vsync_start = 600 + 3,
-	.vsync_end = 600 + 3 + 4,
-	.vtotal = 600 + 3 + 4 + 3,
+	.vsync_start = 600 + 10,
+	.vsync_end = 600 + 10 + 10,
+	.vtotal = 600 + 10 + 10 + 10,
 	.vrefresh = 60,
 };
 
@@ -846,6 +892,55 @@ static const struct panel_desc boe_nv101wxmn51 = {
 	},
 };
 
+static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
+	.clock = 9000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 5,
+	.hsync_end = 480 + 5 + 5,
+	.htotal = 480 + 5 + 5 + 40,
+	.vdisplay = 272,
+	.vsync_start = 272 + 8,
+	.vsync_end = 272 + 8 + 8,
+	.vtotal = 272 + 8 + 8 + 8,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s043wq26h_ct7 = {
+	.modes = &cdtech_s043wq26h_ct7_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 95,
+		.height = 54,
+	},
+	.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
+	.clock = 35000,
+	.hdisplay = 800,
+	.hsync_start = 800 + 40,
+	.hsync_end = 800 + 40 + 40,
+	.htotal = 800 + 40 + 40 + 48,
+	.vdisplay = 480,
+	.vsync_start = 480 + 29,
+	.vsync_end = 480 + 29 + 13,
+	.vtotal = 480 + 29 + 13 + 3,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s070wv95_ct16 = {
+	.modes = &cdtech_s070wv95_ct16_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 154,
+		.height = 85,
+	},
+};
+
 static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
 	.clock = 66770,
 	.hdisplay = 800,
@@ -971,6 +1066,36 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
 };
 
+static const struct display_timing dlc_dlc1010gig_timing = {
+	.pixelclock = { 68900000, 71100000, 73400000 },
+	.hactive = { 1280, 1280, 1280 },
+	.hfront_porch = { 43, 53, 63 },
+	.hback_porch = { 43, 53, 63 },
+	.hsync_len = { 44, 54, 64 },
+	.vactive = { 800, 800, 800 },
+	.vfront_porch = { 5, 8, 11 },
+	.vback_porch = { 5, 8, 11 },
+	.vsync_len = { 5, 7, 11 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc1010gig = {
+	.timings = &dlc_dlc1010gig_timing,
+	.num_timings = 1,
+	.bpc = 8,
+	.size = {
+		.width = 216,
+		.height = 135,
+	},
+	.delay = {
+		.prepare = 60,
+		.enable = 150,
+		.disable = 100,
+		.unprepare = 60,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
 static const struct drm_display_mode edt_et057090dhu_mode = {
 	.clock = 25175,
 	.hdisplay = 640,
@@ -2334,6 +2459,33 @@ static const struct panel_desc winstar_wf35ltiacd = {
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode arm_rtsm_mode[] = {
+	{
+		.clock = 65000,
+		.hdisplay = 1024,
+		.hsync_start = 1024 + 24,
+		.hsync_end = 1024 + 24 + 136,
+		.htotal = 1024 + 24 + 136 + 160,
+		.vdisplay = 768,
+		.vsync_start = 768 + 3,
+		.vsync_end = 768 + 3 + 6,
+		.vtotal = 768 + 3 + 6 + 29,
+		.vrefresh = 60,
+		.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+	},
+};
+
+static const struct panel_desc arm_rtsm = {
+	.modes = arm_rtsm_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 400,
+		.height = 300,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
 static const struct of_device_id platform_of_match[] = {
 	{
 		.compatible = "ampire,am-480272h3tmqw-t01h",
@@ -2342,6 +2494,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "ampire,am800480r3tmqwa1h",
 		.data = &ampire_am800480r3tmqwa1h,
 	}, {
+		.compatible = "arm,rtsm-display",
+		.data = &arm_rtsm,
+	}, {
 		.compatible = "auo,b101aw03",
 		.data = &auo_b101aw03,
 	}, {
@@ -2363,6 +2518,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "auo,g070vvn01",
 		.data = &auo_g070vvn01,
 	}, {
+		.compatible = "auo,g101evn010",
+		.data = &auo_g101evn010,
+	}, {
 		.compatible = "auo,g104sn02",
 		.data = &auo_g104sn02,
 	}, {
@@ -2381,12 +2539,21 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "avic,tm070ddh03",
 		.data = &avic_tm070ddh03,
 	}, {
+		.compatible = "bananapi,s070wv20-ct16",
+		.data = &bananapi_s070wv20_ct16,
+	}, {
 		.compatible = "boe,hv070wsa-100",
 		.data = &boe_hv070wsa
 	}, {
 		.compatible = "boe,nv101wxmn51",
 		.data = &boe_nv101wxmn51,
 	}, {
+		.compatible = "cdtech,s043wq26h-ct7",
+		.data = &cdtech_s043wq26h_ct7,
+	}, {
+		.compatible = "cdtech,s070wv95-ct16",
+		.data = &cdtech_s070wv95_ct16,
+	}, {
 		.compatible = "chunghwa,claa070wp03xg",
 		.data = &chunghwa_claa070wp03xg,
 	}, {
@@ -2402,6 +2569,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "dlc,dlc0700yzg-1",
 		.data = &dlc_dlc0700yzg_1,
 	}, {
+		.compatible = "dlc,dlc1010gig",
+		.data = &dlc_dlc1010gig,
+	}, {
 		.compatible = "edt,et057090dhu",
 		.data = &edt_et057090dhu,
 	}, {
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
new file mode 100644
index 0000000..fc2a66c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+	"vdda",
+	"vdispp",
+	"vdispn",
+};
+
+static unsigned long const regulator_enable_loads[] = {
+	62000,
+	100000,
+	100000,
+};
+
+static unsigned long const regulator_disable_loads[] = {
+	80,
+	100,
+	100,
+};
+
+struct cmd_set {
+	u8 commands[4];
+	u8 size;
+};
+
+struct nt35597_config {
+	u32 width_mm;
+	u32 height_mm;
+	const char *panel_name;
+	const struct cmd_set *panel_on_cmds;
+	u32 num_on_cmds;
+	const struct drm_display_mode *dm;
+};
+
+struct truly_nt35597 {
+	struct device *dev;
+	struct drm_panel panel;
+
+	struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *mode_gpio;
+
+	struct backlight_device *backlight;
+
+	struct mipi_dsi_device *dsi[2];
+
+	const struct nt35597_config *config;
+	bool prepared;
+	bool enabled;
+};
+
+static inline struct truly_nt35597 *panel_to_ctx(struct drm_panel *panel)
+{
+	return container_of(panel, struct truly_nt35597, panel);
+}
+
+static const struct cmd_set qcom_2k_panel_magic_cmds[] = {
+	/* CMD2_P0 */
+	{ { 0xff, 0x20 }, 2 },
+	{ { 0xfb, 0x01 }, 2 },
+	{ { 0x00, 0x01 }, 2 },
+	{ { 0x01, 0x55 }, 2 },
+	{ { 0x02, 0x45 }, 2 },
+	{ { 0x05, 0x40 }, 2 },
+	{ { 0x06, 0x19 }, 2 },
+	{ { 0x07, 0x1e }, 2 },
+	{ { 0x0b, 0x73 }, 2 },
+	{ { 0x0c, 0x73 }, 2 },
+	{ { 0x0e, 0xb0 }, 2 },
+	{ { 0x0f, 0xae }, 2 },
+	{ { 0x11, 0xb8 }, 2 },
+	{ { 0x13, 0x00 }, 2 },
+	{ { 0x58, 0x80 }, 2 },
+	{ { 0x59, 0x01 }, 2 },
+	{ { 0x5a, 0x00 }, 2 },
+	{ { 0x5b, 0x01 }, 2 },
+	{ { 0x5c, 0x80 }, 2 },
+	{ { 0x5d, 0x81 }, 2 },
+	{ { 0x5e, 0x00 }, 2 },
+	{ { 0x5f, 0x01 }, 2 },
+	{ { 0x72, 0x11 }, 2 },
+	{ { 0x68, 0x03 }, 2 },
+	/* CMD2_P4 */
+	{ { 0xFF, 0x24 }, 2 },
+	{ { 0xFB, 0x01 }, 2 },
+	{ { 0x00, 0x1C }, 2 },
+	{ { 0x01, 0x0B }, 2 },
+	{ { 0x02, 0x0C }, 2 },
+	{ { 0x03, 0x01 }, 2 },
+	{ { 0x04, 0x0F }, 2 },
+	{ { 0x05, 0x10 }, 2 },
+	{ { 0x06, 0x10 }, 2 },
+	{ { 0x07, 0x10 }, 2 },
+	{ { 0x08, 0x89 }, 2 },
+	{ { 0x09, 0x8A }, 2 },
+	{ { 0x0A, 0x13 }, 2 },
+	{ { 0x0B, 0x13 }, 2 },
+	{ { 0x0C, 0x15 }, 2 },
+	{ { 0x0D, 0x15 }, 2 },
+	{ { 0x0E, 0x17 }, 2 },
+	{ { 0x0F, 0x17 }, 2 },
+	{ { 0x10, 0x1C }, 2 },
+	{ { 0x11, 0x0B }, 2 },
+	{ { 0x12, 0x0C }, 2 },
+	{ { 0x13, 0x01 }, 2 },
+	{ { 0x14, 0x0F }, 2 },
+	{ { 0x15, 0x10 }, 2 },
+	{ { 0x16, 0x10 }, 2 },
+	{ { 0x17, 0x10 }, 2 },
+	{ { 0x18, 0x89 }, 2 },
+	{ { 0x19, 0x8A }, 2 },
+	{ { 0x1A, 0x13 }, 2 },
+	{ { 0x1B, 0x13 }, 2 },
+	{ { 0x1C, 0x15 }, 2 },
+	{ { 0x1D, 0x15 }, 2 },
+	{ { 0x1E, 0x17 }, 2 },
+	{ { 0x1F, 0x17 }, 2 },
+	/* STV */
+	{ { 0x20, 0x40 }, 2 },
+	{ { 0x21, 0x01 }, 2 },
+	{ { 0x22, 0x00 }, 2 },
+	{ { 0x23, 0x40 }, 2 },
+	{ { 0x24, 0x40 }, 2 },
+	{ { 0x25, 0x6D }, 2 },
+	{ { 0x26, 0x40 }, 2 },
+	{ { 0x27, 0x40 }, 2 },
+	/* Vend */
+	{ { 0xE0, 0x00 }, 2 },
+	{ { 0xDC, 0x21 }, 2 },
+	{ { 0xDD, 0x22 }, 2 },
+	{ { 0xDE, 0x07 }, 2 },
+	{ { 0xDF, 0x07 }, 2 },
+	{ { 0xE3, 0x6D }, 2 },
+	{ { 0xE1, 0x07 }, 2 },
+	{ { 0xE2, 0x07 }, 2 },
+	/* UD */
+	{ { 0x29, 0xD8 }, 2 },
+	{ { 0x2A, 0x2A }, 2 },
+	/* CLK */
+	{ { 0x4B, 0x03 }, 2 },
+	{ { 0x4C, 0x11 }, 2 },
+	{ { 0x4D, 0x10 }, 2 },
+	{ { 0x4E, 0x01 }, 2 },
+	{ { 0x4F, 0x01 }, 2 },
+	{ { 0x50, 0x10 }, 2 },
+	{ { 0x51, 0x00 }, 2 },
+	{ { 0x52, 0x80 }, 2 },
+	{ { 0x53, 0x00 }, 2 },
+	{ { 0x56, 0x00 }, 2 },
+	{ { 0x54, 0x07 }, 2 },
+	{ { 0x58, 0x07 }, 2 },
+	{ { 0x55, 0x25 }, 2 },
+	/* Reset XDONB */
+	{ { 0x5B, 0x43 }, 2 },
+	{ { 0x5C, 0x00 }, 2 },
+	{ { 0x5F, 0x73 }, 2 },
+	{ { 0x60, 0x73 }, 2 },
+	{ { 0x63, 0x22 }, 2 },
+	{ { 0x64, 0x00 }, 2 },
+	{ { 0x67, 0x08 }, 2 },
+	{ { 0x68, 0x04 }, 2 },
+	/* Resolution:1440x2560 */
+	{ { 0x72, 0x02 }, 2 },
+	/* mux */
+	{ { 0x7A, 0x80 }, 2 },
+	{ { 0x7B, 0x91 }, 2 },
+	{ { 0x7C, 0xD8 }, 2 },
+	{ { 0x7D, 0x60 }, 2 },
+	{ { 0x7F, 0x15 }, 2 },
+	{ { 0x75, 0x15 }, 2 },
+	/* ABOFF */
+	{ { 0xB3, 0xC0 }, 2 },
+	{ { 0xB4, 0x00 }, 2 },
+	{ { 0xB5, 0x00 }, 2 },
+	/* Source EQ */
+	{ { 0x78, 0x00 }, 2 },
+	{ { 0x79, 0x00 }, 2 },
+	{ { 0x80, 0x00 }, 2 },
+	{ { 0x83, 0x00 }, 2 },
+	/* FP BP */
+	{ { 0x93, 0x0A }, 2 },
+	{ { 0x94, 0x0A }, 2 },
+	/* Inversion Type */
+	{ { 0x8A, 0x00 }, 2 },
+	{ { 0x9B, 0xFF }, 2 },
+	/* IMGSWAP =1 @PortSwap=1 */
+	{ { 0x9D, 0xB0 }, 2 },
+	{ { 0x9F, 0x63 }, 2 },
+	{ { 0x98, 0x10 }, 2 },
+	/* FRM */
+	{ { 0xEC, 0x00 }, 2 },
+	/* CMD1 */
+	{ { 0xFF, 0x10 }, 2 },
+	/* VBP+VSA=,VFP = 10H */
+	{ { 0x3B, 0x03, 0x0A, 0x0A }, 4 },
+	/* FTE on */
+	{ { 0x35, 0x00 }, 2 },
+	/* EN_BK =1(auto black) */
+	{ { 0xE5, 0x01 }, 2 },
+	/* CMD mode(10) VDO mode(03) */
+	{ { 0xBB, 0x03 }, 2 },
+	/* Non Reload MTP */
+	{ { 0xFB, 0x01 }, 2 },
+};
+
+static int truly_dcs_write(struct drm_panel *panel, u32 command)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+		ret = mipi_dsi_dcs_write(ctx->dsi[i], command, NULL, 0);
+		if (ret < 0) {
+			DRM_DEV_ERROR(ctx->dev,
+				"cmd 0x%x failed for dsi = %d\n",
+				command, i);
+		}
+	}
+
+	return ret;
+}
+
+static int truly_dcs_write_buf(struct drm_panel *panel,
+	u32 size, const u8 *buf)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+		ret = mipi_dsi_dcs_write_buffer(ctx->dsi[i], buf, size);
+		if (ret < 0) {
+			DRM_DEV_ERROR(ctx->dev,
+				"failed to tx cmd [%d], err: %d\n", i, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int truly_35597_power_on(struct truly_nt35597 *ctx)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+		ret = regulator_set_load(ctx->supplies[i].consumer,
+					regulator_enable_loads[i]);
+		if (ret)
+			return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Reset sequence of truly panel requires the panel to be
+	 * out of reset for 10ms, followed by being held in reset
+	 * for 10ms and then out again
+	 */
+	gpiod_set_value(ctx->reset_gpio, 0);
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->reset_gpio, 1);
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->reset_gpio, 0);
+
+	return 0;
+}
+
+static int truly_nt35597_power_off(struct truly_nt35597 *ctx)
+{
+	int ret = 0;
+	int i;
+
+	gpiod_set_value(ctx->reset_gpio, 1);
+
+	for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+		ret = regulator_set_load(ctx->supplies[i].consumer,
+				regulator_disable_loads[i]);
+		if (ret) {
+			DRM_DEV_ERROR(ctx->dev,
+				"regulator_set_load failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret) {
+		DRM_DEV_ERROR(ctx->dev,
+			"regulator_bulk_disable failed %d\n", ret);
+	}
+	return ret;
+}
+
+static int truly_nt35597_disable(struct drm_panel *panel)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int ret;
+
+	if (!ctx->enabled)
+		return 0;
+
+	if (ctx->backlight) {
+		ret = backlight_disable(ctx->backlight);
+		if (ret < 0)
+			DRM_DEV_ERROR(ctx->dev, "backlight disable failed %d\n",
+				ret);
+	}
+
+	ctx->enabled = false;
+	return 0;
+}
+
+static int truly_nt35597_unprepare(struct drm_panel *panel)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int ret = 0;
+
+	if (!ctx->prepared)
+		return 0;
+
+	ctx->dsi[0]->mode_flags = 0;
+	ctx->dsi[1]->mode_flags = 0;
+
+	ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_OFF);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			"set_display_off cmd failed ret = %d\n",
+			ret);
+	}
+
+	/* 120ms delay required here as per DCS spec */
+	msleep(120);
+
+	ret = truly_dcs_write(panel, MIPI_DCS_ENTER_SLEEP_MODE);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			"enter_sleep cmd failed ret = %d\n", ret);
+	}
+
+	ret = truly_nt35597_power_off(ctx);
+	if (ret < 0)
+		DRM_DEV_ERROR(ctx->dev, "power_off failed ret = %d\n", ret);
+
+	ctx->prepared = false;
+	return ret;
+}
+
+static int truly_nt35597_prepare(struct drm_panel *panel)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int ret;
+	int i;
+	const struct cmd_set *panel_on_cmds;
+	const struct nt35597_config *config;
+	u32 num_cmds;
+
+	if (ctx->prepared)
+		return 0;
+
+	ret = truly_35597_power_on(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
+	ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	config = ctx->config;
+	panel_on_cmds = config->panel_on_cmds;
+	num_cmds = config->num_on_cmds;
+
+	for (i = 0; i < num_cmds; i++) {
+		ret = truly_dcs_write_buf(panel,
+				panel_on_cmds[i].size,
+					panel_on_cmds[i].commands);
+		if (ret < 0) {
+			DRM_DEV_ERROR(ctx->dev,
+				"cmd set tx failed i = %d ret = %d\n",
+					i, ret);
+			goto power_off;
+		}
+	}
+
+	ret = truly_dcs_write(panel, MIPI_DCS_EXIT_SLEEP_MODE);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			"exit_sleep_mode cmd failed ret = %d\n",
+			ret);
+		goto power_off;
+	}
+
+	/* Per DSI spec wait 120ms after sending exit sleep DCS command */
+	msleep(120);
+
+	ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_ON);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			"set_display_on cmd failed ret = %d\n", ret);
+		goto power_off;
+	}
+
+	/* Per DSI spec wait 120ms after sending set_display_on DCS command */
+	msleep(120);
+
+	ctx->prepared = true;
+
+	return 0;
+
+power_off:
+	if (truly_nt35597_power_off(ctx))
+		DRM_DEV_ERROR(ctx->dev, "power_off failed\n");
+	return ret;
+}
+
+static int truly_nt35597_enable(struct drm_panel *panel)
+{
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	int ret;
+
+	if (ctx->enabled)
+		return 0;
+
+	if (ctx->backlight) {
+		ret = backlight_enable(ctx->backlight);
+		if (ret < 0)
+			DRM_DEV_ERROR(ctx->dev, "backlight enable failed %d\n",
+						  ret);
+	}
+
+	ctx->enabled = true;
+
+	return 0;
+}
+
+static int truly_nt35597_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct truly_nt35597 *ctx = panel_to_ctx(panel);
+	struct drm_display_mode *mode;
+	const struct nt35597_config *config;
+
+	config = ctx->config;
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_DEV_ERROR(ctx->dev,
+			"failed to create a new display mode\n");
+		return 0;
+	}
+
+	connector->display_info.width_mm = config->width_mm;
+	connector->display_info.height_mm = config->height_mm;
+	drm_mode_copy(mode, config->dm);
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs truly_nt35597_drm_funcs = {
+	.disable = truly_nt35597_disable,
+	.unprepare = truly_nt35597_unprepare,
+	.prepare = truly_nt35597_prepare,
+	.enable = truly_nt35597_enable,
+	.get_modes = truly_nt35597_get_modes,
+};
+
+static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
+{
+	struct device *dev = ctx->dev;
+	int ret, i;
+	const struct nt35597_config *config;
+
+	config = ctx->config;
+	for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+		ctx->supplies[i].supply = regulator_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset_gpio)) {
+		DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+			PTR_ERR(ctx->reset_gpio));
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->mode_gpio = devm_gpiod_get(dev, "mode", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->mode_gpio)) {
+		DRM_DEV_ERROR(dev, "cannot get mode gpio %ld\n",
+			PTR_ERR(ctx->mode_gpio));
+		return PTR_ERR(ctx->mode_gpio);
+	}
+
+	/* dual port */
+	gpiod_set_value(ctx->mode_gpio, 0);
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = dev;
+	ctx->panel.funcs = &truly_nt35597_drm_funcs;
+	drm_panel_add(&ctx->panel);
+
+	return 0;
+}
+
+static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = {
+	.name = "1440x2560",
+	.clock = 268316,
+	.hdisplay = 1440,
+	.hsync_start = 1440 + 200,
+	.hsync_end = 1440 + 200 + 32,
+	.htotal = 1440 + 200 + 32 + 64,
+	.vdisplay = 2560,
+	.vsync_start = 2560 + 8,
+	.vsync_end = 2560 + 8 + 1,
+	.vtotal = 2560 + 8 + 1 + 7,
+	.vrefresh = 60,
+	.flags = 0,
+};
+
+static const struct nt35597_config nt35597_dir = {
+	.width_mm = 74,
+	.height_mm = 131,
+	.panel_name = "qcom_sdm845_mtp_2k_panel",
+	.dm = &qcom_sdm845_mtp_2k_mode,
+	.panel_on_cmds = qcom_2k_panel_magic_cmds,
+	.num_on_cmds = ARRAY_SIZE(qcom_2k_panel_magic_cmds),
+};
+
+static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct truly_nt35597 *ctx;
+	struct mipi_dsi_device *dsi1_device;
+	struct device_node *dsi1;
+	struct mipi_dsi_host *dsi1_host;
+	struct mipi_dsi_device *dsi_dev;
+	int ret = 0;
+	int i;
+
+	const struct mipi_dsi_device_info info = {
+		.type = "trulynt35597",
+		.channel = 0,
+		.node = NULL,
+	};
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+
+	if (!ctx)
+		return -ENOMEM;
+
+	/*
+	 * This device represents itself as one with two input ports which are
+	 * fed by the output ports of the two DSI controllers . The DSI0 is
+	 * the master controller and has most of the panel related info in its
+	 * child node.
+	 */
+
+	ctx->config = of_device_get_match_data(dev);
+
+	if (!ctx->config) {
+		dev_err(dev, "missing device configuration\n");
+		return -ENODEV;
+	}
+
+	dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+	if (!dsi1) {
+		DRM_DEV_ERROR(dev,
+			"failed to get remote node for dsi1_device\n");
+		return -ENODEV;
+	}
+
+	dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
+	of_node_put(dsi1);
+	if (!dsi1_host) {
+		DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+		return -EPROBE_DEFER;
+	}
+
+	/* register the second DSI device */
+	dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
+	if (IS_ERR(dsi1_device)) {
+		DRM_DEV_ERROR(dev, "failed to create dsi device\n");
+		return PTR_ERR(dsi1_device);
+	}
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+	ctx->dsi[0] = dsi;
+	ctx->dsi[1] = dsi1_device;
+
+	ret = truly_nt35597_panel_add(ctx);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to add panel\n");
+		goto err_panel_add;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+		dsi_dev = ctx->dsi[i];
+		dsi_dev->lanes = 4;
+		dsi_dev->format = MIPI_DSI_FMT_RGB888;
+		dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS;
+		ret = mipi_dsi_attach(dsi_dev);
+		if (ret < 0) {
+			DRM_DEV_ERROR(dev,
+				"dsi attach failed i = %d\n", i);
+			goto err_dsi_attach;
+		}
+	}
+
+	return 0;
+
+err_dsi_attach:
+	drm_panel_remove(&ctx->panel);
+err_panel_add:
+	mipi_dsi_device_unregister(dsi1_device);
+	return ret;
+}
+
+static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
+{
+	struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
+
+	if (ctx->dsi[0])
+		mipi_dsi_detach(ctx->dsi[0]);
+	if (ctx->dsi[1]) {
+		mipi_dsi_detach(ctx->dsi[1]);
+		mipi_dsi_device_unregister(ctx->dsi[1]);
+	}
+
+	drm_panel_remove(&ctx->panel);
+	return 0;
+}
+
+static const struct of_device_id truly_nt35597_of_match[] = {
+	{
+		.compatible = "truly,nt35597-2K-display",
+		.data = &nt35597_dir,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, truly_nt35597_of_match);
+
+static struct mipi_dsi_driver truly_nt35597_driver = {
+	.driver = {
+		.name = "panel-truly-nt35597",
+		.of_match_table = truly_nt35597_of_match,
+	},
+	.probe = truly_nt35597_probe,
+	.remove = truly_nt35597_remove,
+};
+module_mipi_dsi_driver(truly_nt35597_driver);
+
+MODULE_DESCRIPTION("Truly NT35597 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 5fa0441..38c938c 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
 		}
 	}
 
+	of_node_put(root);
+
 	/*
 	 * If there is a coretile HDLCD and it has a driver,
 	 * do not mux the CLCD on the motherboard to the DVI.
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 208af9f..dffc509 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -84,6 +84,7 @@ static int qxl_check_header(struct qxl_ring *ring)
 	int ret;
 	struct qxl_ring_header *header = &(ring->ring->header);
 	unsigned long flags;
+
 	spin_lock_irqsave(&ring->lock, flags);
 	ret = header->prod - header->cons < header->num_items;
 	if (ret == 0)
@@ -97,6 +98,7 @@ int qxl_check_idle(struct qxl_ring *ring)
 	int ret;
 	struct qxl_ring_header *header = &(ring->ring->header);
 	unsigned long flags;
+
 	spin_lock_irqsave(&ring->lock, flags);
 	ret = header->prod == header->cons;
 	spin_unlock_irqrestore(&ring->lock, flags);
@@ -110,6 +112,7 @@ int qxl_ring_push(struct qxl_ring *ring,
 	uint8_t *elt;
 	int idx, ret;
 	unsigned long flags;
+
 	spin_lock_irqsave(&ring->lock, flags);
 	if (header->prod - header->cons == header->num_items) {
 		header->notify_on_cons = header->cons + 1;
@@ -156,6 +159,7 @@ static bool qxl_ring_pop(struct qxl_ring *ring,
 	volatile uint8_t *ring_elt;
 	int idx;
 	unsigned long flags;
+
 	spin_lock_irqsave(&ring->lock, flags);
 	if (header->cons == header->prod) {
 		header->notify_on_prod = header->cons + 1;
@@ -365,7 +369,6 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
 	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
 }
 
-
 void qxl_io_destroy_primary(struct qxl_device *qdev)
 {
 	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
@@ -373,7 +376,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
 }
 
 void qxl_io_create_primary(struct qxl_device *qdev,
-			   unsigned offset, struct qxl_bo *bo)
+			   unsigned int offset, struct qxl_bo *bo)
 {
 	struct qxl_surface_create *create;
 
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 15c8406..1184225 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -34,7 +34,6 @@
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
-
 #if defined(CONFIG_DEBUG_FS)
 static int
 qxl_debugfs_irq_received(struct seq_file *m, void *data)
@@ -102,9 +101,9 @@ qxl_debugfs_init(struct drm_minor *minor)
 
 int qxl_debugfs_add_files(struct qxl_device *qdev,
 			  struct drm_info_list *files,
-			  unsigned nfiles)
+			  unsigned int nfiles)
 {
-	unsigned i;
+	unsigned int i;
 
 	for (i = 0; i < qdev->debugfs_count; i++) {
 		if (qdev->debugfs[i].files == files) {
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index 94c5aec..a0ee416 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -28,7 +28,6 @@
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 
-
 #ifndef H_QXL_DEV
 #define H_QXL_DEV
 
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 87d16a0..ce0b9c4 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -253,12 +253,13 @@ static struct mode_size {
 };
 
 static int qxl_add_common_modes(struct drm_connector *connector,
-                                unsigned pwidth,
-                                unsigned pheight)
+                                unsigned int pwidth,
+                                unsigned int pheight)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode = NULL;
 	int i;
+
 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
 				    60, false, false, false);
@@ -315,6 +316,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
 	oldcount = qdev->monitors_config->count;
 	if (crtc->state->active) {
 		struct drm_display_mode *mode = &crtc->mode;
+
 		head.width = mode->hdisplay;
 		head.height = mode->vdisplay;
 		head.x = crtc->x;
@@ -391,9 +393,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
 
 static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
 					 struct drm_file *file_priv,
-					 unsigned flags, unsigned color,
+					 unsigned int flags, unsigned int color,
 					 struct drm_clip_rect *clips,
-					 unsigned num_clips)
+					 unsigned int num_clips)
 {
 	/* TODO: vmwgfx where this was cribbed from had locking. Why? */
 	struct qxl_device *qdev = fb->dev->dev_private;
@@ -620,10 +622,14 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
 		if (ret)
 			goto out_kunmap;
 
-		ret = qxl_release_reserve_list(release, true);
+		ret = qxl_bo_pin(cursor_bo);
 		if (ret)
 			goto out_free_bo;
 
+		ret = qxl_release_reserve_list(release, true);
+		if (ret)
+			goto out_unpin;
+
 		ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
 		if (ret)
 			goto out_backoff;
@@ -668,15 +674,17 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 	qxl_release_fence_buffer_objects(release);
 
-	if (old_cursor_bo)
-		qxl_bo_unref(&old_cursor_bo);
-
+	if (old_cursor_bo != NULL)
+		qxl_bo_unpin(old_cursor_bo);
+	qxl_bo_unref(&old_cursor_bo);
 	qxl_bo_unref(&cursor_bo);
 
 	return;
 
 out_backoff:
 	qxl_release_backoff_reserve_list(release);
+out_unpin:
+	qxl_bo_unpin(cursor_bo);
 out_free_bo:
 	qxl_bo_unref(&cursor_bo);
 out_kunmap:
@@ -755,7 +763,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
 		}
 	}
 
-	ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+	ret = qxl_bo_pin(user_bo);
 	if (ret)
 		return ret;
 
@@ -917,8 +925,8 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
 
 static int qxl_conn_get_modes(struct drm_connector *connector)
 {
-	unsigned pwidth = 1024;
-	unsigned pheight = 768;
+	unsigned int pwidth = 1024;
+	unsigned int pheight = 768;
 	int ret = 0;
 
 	ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
@@ -938,8 +946,8 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
 	/* TODO: is this called for user defined modes? (xrandr --add-mode)
 	 * TODO: check that the mode fits in the framebuffer */
 
-	if(qdev->monitors_config_width == mode->hdisplay &&
-	   qdev->monitors_config_height == mode->vdisplay)
+	if (qdev->monitors_config_width == mode->hdisplay &&
+	    qdev->monitors_config_height == mode->vdisplay)
 		return MODE_OK;
 
 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
@@ -958,7 +966,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
 	return &qxl_output->enc;
 }
 
-
 static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
 };
 
@@ -1103,7 +1110,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
 	}
 	qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
 
-	ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
+	ret = qxl_bo_pin(qdev->monitors_config_bo);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index cc5b32e..c408bb8 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -25,7 +25,7 @@
 
 static int alloc_clips(struct qxl_device *qdev,
 		       struct qxl_release *release,
-		       unsigned num_clips,
+		       unsigned int num_clips,
 		       struct qxl_bo **clips_bo)
 {
 	int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
@@ -37,7 +37,7 @@ static int alloc_clips(struct qxl_device *qdev,
  * the qxl_clip_rects. This is *not* the same as the memory allocated
  * on the device, it is offset to qxl_clip_rects.chunk.data */
 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
-					      unsigned num_clips,
+					      unsigned int num_clips,
 					      struct qxl_bo *clips_bo)
 {
 	struct qxl_clip_rects *dev_clips;
@@ -168,6 +168,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
 	int ret;
 	struct qxl_drm_image *dimage;
 	struct qxl_bo *palette_bo = NULL;
+
 	if (stride == 0)
 		stride = depth * width / 8;
 
@@ -214,6 +215,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
 
 	if (depth == 1) {
 		void *ptr;
+
 		ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
 
 		ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
@@ -245,8 +247,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
 	qxl_release_fence_buffer_objects(release);
 
 out_free_palette:
-	if (palette_bo)
-		qxl_bo_unref(&palette_bo);
+	qxl_bo_unref(&palette_bo);
 out_free_image:
 	qxl_image_free_objects(qdev, dimage);
 out_free_drawable:
@@ -264,9 +265,9 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
 void qxl_draw_dirty_fb(struct qxl_device *qdev,
 		       struct drm_framebuffer *fb,
 		       struct qxl_bo *bo,
-		       unsigned flags, unsigned color,
+		       unsigned int flags, unsigned int color,
 		       struct drm_clip_rect *clips,
-		       unsigned num_clips, int inc)
+		       unsigned int num_clips, int inc)
 {
 	/*
 	 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
@@ -340,7 +341,6 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
 	if (ret)
 		goto out_release_backoff;
 
-
 	ret = qxl_image_init(qdev, release, dimage, surface_base,
 			     left, top, width, height, depth, stride);
 	qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8ff70a7..13a0254 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -23,7 +23,6 @@
  *          Alon Levy
  */
 
-
 #ifndef QXL_DRV_H
 #define QXL_DRV_H
 
@@ -83,16 +82,16 @@ struct qxl_bo {
 	struct ttm_placement		placement;
 	struct ttm_buffer_object	tbo;
 	struct ttm_bo_kmap_obj		kmap;
-	unsigned			pin_count;
+	unsigned int pin_count;
 	void				*kptr;
 	int                             type;
 
 	/* Constant after initialization */
 	struct drm_gem_object		gem_base;
-	bool is_primary; /* is this now a primary surface */
-	bool is_dumb;
+	unsigned int is_primary:1; /* is this now a primary surface */
+	unsigned int is_dumb:1;
 	struct qxl_bo *shadow;
-	bool hw_surf_alloc;
+	unsigned int hw_surf_alloc:1;
 	struct qxl_surface surf;
 	uint32_t surface_id;
 	struct qxl_release *surf_create;
@@ -127,13 +126,9 @@ struct qxl_output {
 #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
 
 struct qxl_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	bool				mem_global_referenced;
 	struct ttm_bo_device		bdev;
 };
 
-
 struct qxl_memslot {
 	uint8_t		generation;
 	uint64_t	start_phys_addr;
@@ -191,12 +186,12 @@ struct qxl_draw_fill {
  */
 struct qxl_debugfs {
 	struct drm_info_list	*files;
-	unsigned		num_files;
+	unsigned int num_files;
 };
 
 int qxl_debugfs_add_files(struct qxl_device *rdev,
 			     struct drm_info_list *files,
-			     unsigned nfiles);
+			     unsigned int nfiles);
 int qxl_debugfs_fence_init(struct qxl_device *rdev);
 
 struct qxl_device;
@@ -231,7 +226,7 @@ struct qxl_device {
 
 	struct qxl_ram_header *ram_header;
 
-	bool primary_created;
+	unsigned int primary_created:1;
 
 	struct qxl_memslot	*mem_slots;
 	uint8_t		n_mem_slots;
@@ -254,7 +249,7 @@ struct qxl_device {
 	atomic_t irq_received_display;
 	atomic_t irq_received_cursor;
 	atomic_t irq_received_io_cmd;
-	unsigned irq_received_error;
+	unsigned int irq_received_error;
 	wait_queue_head_t display_event;
 	wait_queue_head_t cursor_event;
 	wait_queue_head_t io_cmd_event;
@@ -262,7 +257,7 @@ struct qxl_device {
 
 	/* debugfs */
 	struct qxl_debugfs	debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
-	unsigned		debugfs_count;
+	unsigned int debugfs_count;
 
 	struct mutex		update_area_mutex;
 
@@ -372,7 +367,6 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
 		       struct drm_device *dev,
 		       uint32_t handle, uint64_t *offset_p);
 
-
 /* qxl ttm */
 int qxl_ttm_init(struct qxl_device *qdev);
 void qxl_ttm_fini(struct qxl_device *qdev);
@@ -398,7 +392,7 @@ void qxl_update_screen(struct qxl_device *qxl);
 /* qxl io operations (qxl_cmd.c) */
 
 void qxl_io_create_primary(struct qxl_device *qdev,
-			   unsigned offset,
+			   unsigned int offset,
 			   struct qxl_bo *bo);
 void qxl_io_destroy_primary(struct qxl_device *qdev);
 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@ -449,9 +443,9 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
 void qxl_draw_dirty_fb(struct qxl_device *qdev,
 		       struct drm_framebuffer *fb,
 		       struct qxl_bo *bo,
-		       unsigned flags, unsigned color,
+		       unsigned int flags, unsigned int color,
 		       struct drm_clip_rect *clips,
-		       unsigned num_clips, int inc);
+		       unsigned int num_clips, int inc);
 
 void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
 
@@ -496,7 +490,7 @@ bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
 
 int qxl_debugfs_add_files(struct qxl_device *qdev,
 			  struct drm_info_list *files,
-			  unsigned nfiles);
+			  unsigned int nfiles);
 
 int qxl_surface_id_alloc(struct qxl_device *qdev,
 			 struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index c666b89..e3765739 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -38,6 +38,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
 	int r;
 	struct qxl_surface surf;
 	uint32_t pitch, format;
+
 	pitch = args->width * ((args->bpp + 1) / 8);
 	args->size = pitch * args->height;
 	args->size = ALIGN(args->size, PAGE_SIZE);
@@ -52,7 +53,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
 	default:
 		return -EINVAL;
 	}
-	  
+
 	surf.width = args->width;
 	surf.height = args->height;
 	surf.stride = pitch;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2294b7f..a819d24 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -111,7 +111,7 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
 	qbo->surf.stride = mode_cmd->pitches[0];
 	qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
 
-	ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+	ret = qxl_bo_pin(qbo);
 	if (ret) {
 		goto out_unref;
 	}
@@ -134,9 +134,9 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
  */
 static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
 				   struct drm_file *file_priv,
-				   unsigned flags, unsigned color,
+				   unsigned int flags, unsigned int color,
 				   struct drm_clip_rect *clips,
-				   unsigned num_clips)
+				   unsigned int num_clips)
 {
 	struct qxl_device *qdev = fb->dev->dev_private;
 	struct fb_info *info = qdev->fb_helper.fbdev;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 7fbcc35..43688ec 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -136,6 +136,7 @@ qxl_image_init_helper(struct qxl_device *qdev,
 		int remain;
 		int page;
 		int size;
+
 		if (stride == linesize && chunk_stride == stride) {
 			remain = linesize * height;
 			page = 0;
@@ -162,7 +163,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
 				page++;
 			}
 		} else {
-			unsigned page_base, page_offset, out_offset;
+			unsigned int page_base, page_offset, out_offset;
+
 			for (i = 0 ; i < height ; ++i) {
 				i_data = (void *)data + i * stride;
 				remain = linesize;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6cc9f33..6e828158 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -85,6 +85,7 @@ static void
 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
 {
 	void *reloc_page;
+
 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
 	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
 											      info->src_bo,
@@ -189,6 +190,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
 
 	{
 		struct qxl_drawable *draw = fb_cmd;
+
 		draw->mm_time = qdev->rom->mm_clock;
 	}
 
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e25c589..15238a4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -92,6 +92,7 @@ void qxl_reinit_memslots(struct qxl_device *qdev)
 static void qxl_gc_work(struct work_struct *work)
 {
 	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+
 	qxl_garbage_collect(qdev);
 }
 
@@ -284,7 +285,6 @@ int qxl_device_init(struct qxl_device *qdev,
 		 (unsigned long)qdev->surfaceram_base,
 		 (unsigned long)qdev->surfaceram_size);
 
-
 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
 
 	return 0;
@@ -313,10 +313,8 @@ int qxl_device_init(struct qxl_device *qdev,
 
 void qxl_device_fini(struct qxl_device *qdev)
 {
-	if (qdev->current_release_bo[0])
-		qxl_bo_unref(&qdev->current_release_bo[0]);
-	if (qdev->current_release_bo[1])
-		qxl_bo_unref(&qdev->current_release_bo[1]);
+	qxl_bo_unref(&qdev->current_release_bo[0]);
+	qxl_bo_unref(&qdev->current_release_bo[1]);
 	flush_work(&qdev->gc_work);
 	qxl_ring_free(qdev->command_ring);
 	qxl_ring_free(qdev->cursor_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6a30196..91f3bbc 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,7 +54,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
 {
 	u32 c = 0;
 	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
-	unsigned i;
+	unsigned int i;
 
 	qbo->placement.placement = qbo->placements;
 	qbo->placement.busy_placement = qbo->placements;
@@ -74,7 +74,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
 	}
 }
 
-
 int qxl_bo_create(struct qxl_device *qdev,
 		  unsigned long size, bool kernel, bool pinned, u32 domain,
 		  struct qxl_surface *surf,
@@ -187,13 +186,9 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 			       struct qxl_bo *bo, void *pmap)
 {
 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-	struct io_mapping *map;
 
-	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
-		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
-		map = qdev->surface_mapping;
-	else
+	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
 		goto fallback;
 
 	io_mapping_unmap_atomic(pmap);
@@ -201,7 +196,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
 	ttm_mem_io_unlock(man);
-	return ;
+	return;
  fallback:
 	qxl_bo_kunmap(bo);
 }
@@ -221,7 +216,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
 	return bo;
 }
 
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo)
 {
 	struct ttm_operation_ctx ctx = { false, false };
 	struct drm_device *ddev = bo->gem_base.dev;
@@ -229,16 +224,12 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 
 	if (bo->pin_count) {
 		bo->pin_count++;
-		if (gpu_addr)
-			*gpu_addr = qxl_bo_gpu_offset(bo);
 		return 0;
 	}
-	qxl_ttm_placement_from_domain(bo, domain, true);
+	qxl_ttm_placement_from_domain(bo, bo->type, true);
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (likely(r == 0)) {
 		bo->pin_count = 1;
-		if (gpu_addr != NULL)
-			*gpu_addr = qxl_bo_gpu_offset(bo);
 	}
 	if (unlikely(r != 0))
 		dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -266,13 +257,12 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
 	return r;
 }
 
-
 /*
  * Reserve the BO before pinning the object.  If the BO was reserved
  * beforehand, use the internal version directly __qxl_bo_pin.
  *
  */
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+int qxl_bo_pin(struct qxl_bo *bo)
 {
 	int r;
 
@@ -280,7 +270,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 	if (r)
 		return r;
 
-	r = __qxl_bo_pin(bo, bo->type, NULL);
+	r = __qxl_bo_pin(bo);
 	qxl_bo_unreserve(bo);
 	return r;
 }
@@ -335,6 +325,7 @@ void qxl_bo_fini(struct qxl_device *qdev)
 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 {
 	int ret;
+
 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
 		/* allocate a surface id for this surface now */
 		ret = qxl_surface_id_alloc(qdev, bo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 0374fd9..255b914 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -35,6 +35,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct drm_device *ddev = bo->gem_base.dev;
+
 			dev_err(ddev->dev, "%p reserve failed\n", bo);
 		}
 		return r;
@@ -71,6 +72,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct drm_device *ddev = bo->gem_base.dev;
+
 			dev_err(ddev->dev, "%p reserve failed for wait\n",
 				bo);
 		}
@@ -95,7 +97,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
 extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
 extern void qxl_bo_unref(struct qxl_bo **bo);
-extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_pin(struct qxl_bo *bo);
 extern int qxl_bo_unpin(struct qxl_bo *bo);
 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 9f029dd..a55dece 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,7 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
 	WARN_ONCE(1, "not implemented");
 }
 
-
 struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
 	WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e37f0097..30f85f0 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 
 	qxl_bo_ref(bo);
 	entry->tv.bo = &bo->tbo;
-	entry->tv.shared = false;
+	entry->tv.num_shared = 0;
 	list_add_tail(&entry->tv.head, &release->bos);
 	return 0;
 }
@@ -234,7 +234,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
 			return ret;
 	}
 
-	ret = reservation_object_reserve_shared(bo->tbo.resv);
+	ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
 	if (ret)
 		return ret;
 
@@ -282,7 +282,6 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
 	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
 }
 
-
 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 				       enum qxl_surface_cmd_type surface_cmd_type,
 				       struct qxl_release *create_rel,
@@ -428,8 +427,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 	struct ttm_buffer_object *bo;
 	struct ttm_bo_global *glob;
 	struct ttm_bo_device *bdev;
-	struct ttm_bo_driver *driver;
-	struct qxl_bo *qbo;
 	struct ttm_validate_buffer *entry;
 	struct qxl_device *qdev;
 
@@ -450,14 +447,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 		       release->id | 0xf0000000, release->base.seqno);
 	trace_dma_fence_emit(&release->base);
 
-	driver = bdev->driver;
 	glob = bdev->glob;
 
 	spin_lock(&glob->lru_lock);
 
 	list_for_each_entry(entry, &release->bos, head) {
 		bo = entry->bo;
-		qbo = to_qxl_bo(bo);
 
 		reservation_object_add_shared_fence(bo->resv, &release->base);
 		ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 86a1fb3..886f61e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
 	return qdev;
 }
 
-static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int qxl_ttm_global_init(struct qxl_device *qdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	qdev->mman.mem_global_referenced = false;
-	global_ref = &qdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &qxl_ttm_mem_global_init;
-	global_ref->release = &qxl_ttm_mem_global_release;
-
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	qdev->mman.bo_global_ref.mem_glob =
-		qdev->mman.mem_global_ref.object;
-	global_ref = &qdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&qdev->mman.mem_global_ref);
-		return r;
-	}
-
-	qdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void qxl_ttm_global_fini(struct qxl_device *qdev)
-{
-	if (qdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&qdev->mman.mem_global_ref);
-		qdev->mman.mem_global_referenced = false;
-	}
-}
-
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
@@ -174,7 +118,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	default:
-		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
 		return -EINVAL;
 	}
 	return 0;
@@ -331,7 +275,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
 	if (ret)
 		return ret;
 
-
 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		qxl_move_null(bo, new_mem);
 		return 0;
@@ -373,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
 	int r;
 	int num_io_pages; /* != rom->num_io_pages, we include surface0 */
 
-	r = qxl_ttm_global_init(qdev);
-	if (r)
-		return r;
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&qdev->mman.bdev,
-			       qdev->mman.bo_global_ref.ref.object,
 			       &qxl_bo_driver,
 			       qdev->ddev.anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET, 0);
@@ -401,11 +340,11 @@ int qxl_ttm_init(struct qxl_device *qdev)
 		return r;
 	}
 	DRM_INFO("qxl: %uM of VRAM memory size\n",
-		 (unsigned)qdev->vram_size / (1024 * 1024));
+		 (unsigned int)qdev->vram_size / (1024 * 1024));
 	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
-		 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+		 ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
 	DRM_INFO("qxl: %uM of Surface memory size\n",
-		 (unsigned)qdev->surfaceram_size / (1024 * 1024));
+		 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
 	return 0;
 }
 
@@ -414,11 +353,9 @@ void qxl_ttm_fini(struct qxl_device *qdev)
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 	ttm_bo_device_release(&qdev->mman.bdev);
-	qxl_ttm_global_fini(qdev);
 	DRM_INFO("qxl: ttm finalized\n");
 }
 
-
 #define QXL_DEBUGFS_MEM_TYPES 2
 
 #if defined(CONFIG_DEBUG_FS)
@@ -443,7 +380,7 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
 #if defined(CONFIG_DEBUG_FS)
 	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
 	static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
-	unsigned i;
+	unsigned int i;
 
 	for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
 		if (i == 0)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 21161aa..652126f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 					  ((idx_value >> 21) & 0xF));
 				return -EINVAL;
 			}
-			/* Pass through. */
+			/* Fall through. */
 		case 6:
 			track->cb[i].cpp = 4;
 			break;
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 				return -EINVAL;
 			}
 			/* The same rules apply as for DXT3/5. */
-			/* Pass through. */
+			/* Fall through. */
 		case R300_TX_FORMAT_DXT3:
 		case R300_TX_FORMAT_DXT5:
 			track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 45e1d4e..2318d9e3 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev)
 	default:
 		/* force to 1 pipe */
 		num_pipes = 1;
+		/* fall through */
 	case 1:
 		tmp = (0 << 1);
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1a6f6ed..32808e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -448,10 +448,7 @@ struct radeon_surface_reg {
  * TTM.
  */
 struct radeon_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
 	struct ttm_bo_device		bdev;
-	bool				mem_global_referenced;
 	bool				initialized;
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1ae31dbc..f433053 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 		}
 
 		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
-		p->relocs[i].tv.shared = !r->write_domain;
+		p->relocs[i].tv.num_shared = !r->write_domain;
 
 		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
 				      priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 
 		resv = reloc->robj->tbo.resv;
 		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
-				     reloc->tv.shared);
+				     reloc->tv.num_shared);
 		if (r)
 			return r;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 27d8e7d..44617de 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 	INIT_LIST_HEAD(&list);
 
 	tv.bo = &bo_va->bo->tbo;
-	tv.shared = true;
+	tv.num_shared = 1;
 	list_add(&tv.head, &list);
 
 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 4278272..3dae2c4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode
 
 static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
 {
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-	struct radeon_crtc *radeon_crtc;
 	int restart;
 	unsigned int h_total, v_total, f_total;
 	int v_offset, h_offset;
 	u16 p1, p2, h_inc;
 	bool h_changed;
 	const struct radeon_tv_mode_constants *const_ptr;
-	struct radeon_pll *pll;
-
-	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
-	if (radeon_crtc->crtc_id == 1)
-		pll = &rdev->clock.p2pll;
-	else
-		pll = &rdev->clock.p1pll;
 
 	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
 	if (!const_ptr)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 92f6d40..833e909 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
 void radeon_bo_unref(struct radeon_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
-	struct radeon_device *rdev;
 
 	if ((*bo) == NULL)
 		return;
-	rdev = (*bo)->rdev;
 	tbo = &((*bo)->tbo);
 	ttm_bo_put(tbo);
 	*bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbb67e9..9920a6f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 	return rdev;
 }
 
-
-/*
- * Global memory.
- */
-static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int radeon_ttm_global_init(struct radeon_device *rdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	rdev->mman.mem_global_referenced = false;
-	global_ref = &rdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &radeon_ttm_mem_global_init;
-	global_ref->release = &radeon_ttm_mem_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	rdev->mman.bo_global_ref.mem_glob =
-		rdev->mman.mem_global_ref.object;
-	global_ref = &rdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&rdev->mman.mem_global_ref);
-		return r;
-	}
-
-	rdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void radeon_ttm_global_fini(struct radeon_device *rdev)
-{
-	if (rdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&rdev->mman.mem_global_ref);
-		rdev->mman.mem_global_referenced = false;
-	}
-}
-
 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
 	return 0;
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
 {
 	int r;
 
-	r = radeon_ttm_global_init(rdev);
-	if (r) {
-		return r;
-	}
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&rdev->mman.bdev,
-			       rdev->mman.bo_global_ref.ref.object,
 			       &radeon_bo_driver,
 			       rdev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET,
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
 	ttm_bo_device_release(&rdev->mman.bdev);
 	radeon_gart_fini(rdev);
-	radeon_ttm_global_fini(rdev);
 	rdev->mman.initialized = false;
 	DRM_INFO("radeon: ttm finalized\n");
 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 7f1a9c7..0d37421 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
 	list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
 	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
 	list[0].tv.bo = &vm->page_directory->tbo;
-	list[0].tv.shared = true;
+	list[0].tv.num_shared = 1;
 	list[0].tiling_flags = 0;
 	list_add(&list[0].tv.head, head);
 
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
 		list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
 		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
 		list[idx].tv.bo = &list[idx].robj->tbo;
-		list[idx].tv.shared = true;
+		list[idx].tv.num_shared = 1;
 		list[idx].tiling_flags = 0;
 		list_add(&list[idx++].tv.head, head);
 	}
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
 		int r;
 
 		radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
-		r = reservation_object_reserve_shared(pt->tbo.resv);
+		r = reservation_object_reserve_shared(pt->tbo.resv, 1);
 		if (r)
 			return r;
 
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 		bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
 
 	if (mem) {
-		addr = mem->start << PAGE_SHIFT;
+		addr = (u64)mem->start << PAGE_SHIFT;
 		if (mem->mem_type != TTM_PL_SYSTEM) {
 			bo_va->flags |= RADEON_VM_PAGE_VALID;
 		}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 1774184..90dacab 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
 		 * system clock, and have no internal clock divider.
 		 */
 
-		if (WARN_ON(!rcrtc->extclock))
-			return;
-
 		/*
 		 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
 		 * We can work around them by configuring the DPLL to twice the
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
 	 * CRTC will be put later in .atomic_disable().
 	 *
 	 * If a mode set is not in progress the CRTC is enabled, and the
-	 * following get call will be a no-op. There is thus no need to belance
+	 * following get call will be a no-op. There is thus no need to balance
 	 * it in .atomic_flush() either.
 	 */
 	rcar_du_crtc_get(rcrtc);
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 	struct rcar_du_device *rcdu = rcrtc->group->dev;
 	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+	unsigned int vbp;
 
 	if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
 		return MODE_NO_INTERLACE;
 
+	/*
+	 * The hardware requires a minimum combined horizontal sync and back
+	 * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+	 */
+	if (mode->htotal - mode->hsync_start < 20)
+		return MODE_HBLANK_NARROW;
+
+	vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
+	if (vbp < 3)
+		return MODE_VBLANK_NARROW;
+
 	return MODE_OK;
 }
 
@@ -1002,7 +1011,7 @@ static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 
-	return 0;
+	return ret;
 }
 
 static const struct drm_crtc_funcs crtc_funcs_gen2 = {
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
 	clk = devm_clk_get(rcdu->dev, clk_name);
 	if (!IS_ERR(clk)) {
 		rcrtc->extclock = clk;
-	} else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
-		dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
+	} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
 		return -EPROBE_DEFER;
+	} else if (rcdu->info->dpll_mask & BIT(hwindex)) {
+		/*
+		 * DU channels that have a display PLL can't use the internal
+		 * system clock and thus require an external clock.
+		 */
+		ret = PTR_ERR(clk);
+		dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
+		return ret;
 	}
 
 	init_waitqueue_head(&rcrtc->flip_wait);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 084f58d..f50a3b1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
 #include "rcar_du_drv.h"
@@ -41,7 +42,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
 	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
-		 * R8A7743 has one RGB output and one LVDS output
+		 * R8A774[34] has one RGB output and one LVDS output
 		 */
 		[RCAR_DU_OUTPUT_DPAD0] = {
 			.possible_crtcs = BIT(1) | BIT(0),
@@ -77,6 +78,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
 	},
 };
 
+static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
+	.gen = 2,
+	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
+		  | RCAR_DU_FEATURE_INTERLACED
+		  | RCAR_DU_FEATURE_TVM_SYNC,
+	.channels_mask = BIT(1) | BIT(0),
+	.routes = {
+		/*
+		 * R8A77470 has two RGB outputs, one LVDS output, and
+		 * one (currently unsupported) analog video output
+		 */
+		[RCAR_DU_OUTPUT_DPAD0] = {
+			.possible_crtcs = BIT(0),
+			.port = 0,
+		},
+		[RCAR_DU_OUTPUT_DPAD1] = {
+			.possible_crtcs = BIT(1),
+			.port = 1,
+		},
+		[RCAR_DU_OUTPUT_LVDS0] = {
+			.possible_crtcs = BIT(0) | BIT(1),
+			.port = 2,
+		},
+	},
+};
+
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_INTERLACED
@@ -341,7 +369,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
 
 static const struct of_device_id rcar_du_of_table[] = {
 	{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+	{ .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
 	{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
+	{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
 	{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
 	{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
 	{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -363,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
  * DRM operations
  */
 
-static void rcar_du_lastclose(struct drm_device *dev)
-{
-	struct rcar_du_device *rcdu = dev->dev_private;
-
-	drm_fbdev_cma_restore_mode(rcdu->fbdev);
-}
-
 DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
 
 static struct drm_driver rcar_du_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
 				| DRIVER_ATOMIC,
-	.lastclose		= rcar_du_lastclose,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
@@ -404,32 +426,15 @@ static struct drm_driver rcar_du_driver = {
 static int rcar_du_pm_suspend(struct device *dev)
 {
 	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
-	struct drm_atomic_state *state;
 
-	drm_kms_helper_poll_disable(rcdu->ddev);
-	drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, true);
-
-	state = drm_atomic_helper_suspend(rcdu->ddev);
-	if (IS_ERR(state)) {
-		drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
-		drm_kms_helper_poll_enable(rcdu->ddev);
-		return PTR_ERR(state);
-	}
-
-	rcdu->suspend_state = state;
-
-	return 0;
+	return drm_mode_config_helper_suspend(rcdu->ddev);
 }
 
 static int rcar_du_pm_resume(struct device *dev)
 {
 	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
 
-	drm_atomic_helper_resume(rcdu->ddev, rcdu->suspend_state);
-	drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
-	drm_kms_helper_poll_enable(rcdu->ddev);
-
-	return 0;
+	return drm_mode_config_helper_resume(rcdu->ddev);
 }
 #endif
 
@@ -448,13 +453,10 @@ static int rcar_du_remove(struct platform_device *pdev)
 
 	drm_dev_unregister(ddev);
 
-	if (rcdu->fbdev)
-		drm_fbdev_cma_fini(rcdu->fbdev);
-
 	drm_kms_helper_poll_fini(ddev);
 	drm_mode_config_cleanup(ddev);
 
-	drm_dev_unref(ddev);
+	drm_dev_put(ddev);
 
 	return 0;
 }
@@ -510,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev)
 
 	DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
 
+	drm_fbdev_generic_setup(ddev, 32);
+
 	return 0;
 
 error:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 143c037..a68da79 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -20,7 +20,6 @@
 struct clk;
 struct device;
 struct drm_device;
-struct drm_fbdev_cma;
 struct rcar_du_device;
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK	BIT(0)	/* Per-CRTC IRQ and clock */
@@ -78,8 +77,6 @@ struct rcar_du_device {
 	void __iomem *mmio;
 
 	struct drm_device *ddev;
-	struct drm_fbdev_cma *fbdev;
-	struct drm_atomic_state *suspend_state;
 
 	struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
 	unsigned int num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 4ebd61e..9c7007d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
 }
 
-static void rcar_du_output_poll_changed(struct drm_device *dev)
-{
-	struct rcar_du_device *rcdu = dev->dev_private;
-
-	drm_fbdev_cma_hotplug_event(rcdu->fbdev);
-}
-
 /* -----------------------------------------------------------------------------
  * Atomic Check and Update
  */
@@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
 
 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
 	.fb_create = rcar_du_fb_create,
-	.output_poll_changed = rcar_du_output_poll_changed,
 	.atomic_check = rcar_du_atomic_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
@@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 
 	struct drm_device *dev = rcdu->ddev;
 	struct drm_encoder *encoder;
-	struct drm_fbdev_cma *fbdev;
 	unsigned int dpad0_sources;
 	unsigned int num_encoders;
 	unsigned int num_groups;
@@ -582,7 +573,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	 * Initialize vertical blanking interrupts handling. Start with vblank
 	 * disabled for all CRTCs.
 	 */
-	ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+	ret = drm_vblank_init(dev, rcdu->num_crtcs);
 	if (ret < 0)
 		return ret;
 
@@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 
 	drm_kms_helper_poll_init(dev);
 
-	if (dev->mode_config.num_connector) {
-		fbdev = drm_fbdev_cma_init(dev, 32,
-					   dev->mode_config.num_connector);
-		if (IS_ERR(fbdev))
-			return PTR_ERR(fbdev);
-
-		rcdu->fbdev = fbdev;
-	} else {
-		dev_info(rcdu->dev,
-			 "no connector found, disabling fbdev emulation\n");
-	}
-
 	return 0;
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 9e07758..39d5ae3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
 		drm_plane_helper_add(&plane->plane,
 				     &rcar_du_plane_helper_funcs);
 
+		drm_plane_create_alpha_property(&plane->plane);
+
 		if (type == DRM_PLANE_TYPE_PRIMARY)
 			continue;
 
 		drm_object_attach_property(&plane->plane.base,
 					   rcdu->props.colorkey,
 					   RCAR_DU_COLORKEY_NONE);
-		drm_plane_create_alpha_property(&plane->plane);
 		drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
 	}
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 173d7ad..534a128 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
 	{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
 	{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
 	{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+	{ .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
 	{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
 	{ .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
 	{ .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26438d4..1e75196 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -7,7 +7,7 @@
 	select VIDEOMODE_HELPERS
 	select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
 	select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
-	select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+	select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
 	select DRM_RGB if ROCKCHIP_RGB
 	select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
 	help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 868263f..f6fc9d5d 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -11,7 +11,7 @@
 rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
 rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
 rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
-rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
 rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
 rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
 rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965..5a48548 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
 }
 
 static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
-				       u8 *buff, u8 buff_size)
+				       u8 *buff, u16 buff_size)
 {
 	u32 i;
 	int ret;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
new file mode 100644
index 0000000..7ee359b
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ *      Chris Zhong <zyw@rock-chips.com>
+ *      Nickey Yang <nickey.yang@rock-chips.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <video/mipi_display.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DSI_PHY_RSTZ			0xa0
+#define PHY_DISFORCEPLL			0
+#define PHY_ENFORCEPLL			BIT(3)
+#define PHY_DISABLECLK			0
+#define PHY_ENABLECLK			BIT(2)
+#define PHY_RSTZ			0
+#define PHY_UNRSTZ			BIT(1)
+#define PHY_SHUTDOWNZ			0
+#define PHY_UNSHUTDOWNZ			BIT(0)
+
+#define DSI_PHY_IF_CFG			0xa4
+#define N_LANES(n)			((((n) - 1) & 0x3) << 0)
+#define PHY_STOP_WAIT_TIME(cycle)	(((cycle) & 0xff) << 8)
+
+#define DSI_PHY_STATUS			0xb0
+#define LOCK				BIT(0)
+#define STOP_STATE_CLK_LANE		BIT(2)
+
+#define DSI_PHY_TST_CTRL0		0xb4
+#define PHY_TESTCLK			BIT(1)
+#define PHY_UNTESTCLK			0
+#define PHY_TESTCLR			BIT(0)
+#define PHY_UNTESTCLR			0
+
+#define DSI_PHY_TST_CTRL1		0xb8
+#define PHY_TESTEN			BIT(16)
+#define PHY_UNTESTEN			0
+#define PHY_TESTDOUT(n)			(((n) & 0xff) << 8)
+#define PHY_TESTDIN(n)			(((n) & 0xff) << 0)
+
+#define DSI_INT_ST0			0xbc
+#define DSI_INT_ST1			0xc0
+#define DSI_INT_MSK0			0xc4
+#define DSI_INT_MSK1			0xc8
+
+#define PHY_STATUS_TIMEOUT_US		10000
+#define CMD_PKT_STATUS_TIMEOUT_US	20000
+
+#define BYPASS_VCO_RANGE	BIT(7)
+#define VCO_RANGE_CON_SEL(val)	(((val) & 0x7) << 3)
+#define VCO_IN_CAP_CON_DEFAULT	(0x0 << 1)
+#define VCO_IN_CAP_CON_LOW	(0x1 << 1)
+#define VCO_IN_CAP_CON_HIGH	(0x2 << 1)
+#define REF_BIAS_CUR_SEL	BIT(0)
+
+#define CP_CURRENT_3UA	0x1
+#define CP_CURRENT_4_5UA	0x2
+#define CP_CURRENT_7_5UA	0x6
+#define CP_CURRENT_6UA	0x9
+#define CP_CURRENT_12UA	0xb
+#define CP_CURRENT_SEL(val)	((val) & 0xf)
+#define CP_PROGRAM_EN		BIT(7)
+
+#define LPF_RESISTORS_15_5KOHM	0x1
+#define LPF_RESISTORS_13KOHM	0x2
+#define LPF_RESISTORS_11_5KOHM	0x4
+#define LPF_RESISTORS_10_5KOHM	0x8
+#define LPF_RESISTORS_8KOHM	0x10
+#define LPF_PROGRAM_EN		BIT(6)
+#define LPF_RESISTORS_SEL(val)	((val) & 0x3f)
+
+#define HSFREQRANGE_SEL(val)	(((val) & 0x3f) << 1)
+
+#define INPUT_DIVIDER(val)	(((val) - 1) & 0x7f)
+#define LOW_PROGRAM_EN		0
+#define HIGH_PROGRAM_EN		BIT(7)
+#define LOOP_DIV_LOW_SEL(val)	(((val) - 1) & 0x1f)
+#define LOOP_DIV_HIGH_SEL(val)	((((val) - 1) >> 5) & 0xf)
+#define PLL_LOOP_DIV_EN		BIT(5)
+#define PLL_INPUT_DIV_EN	BIT(4)
+
+#define POWER_CONTROL		BIT(6)
+#define INTERNAL_REG_CURRENT	BIT(3)
+#define BIAS_BLOCK_ON		BIT(2)
+#define BANDGAP_ON		BIT(0)
+
+#define TER_RESISTOR_HIGH	BIT(7)
+#define	TER_RESISTOR_LOW	0
+#define LEVEL_SHIFTERS_ON	BIT(6)
+#define TER_CAL_DONE		BIT(5)
+#define SETRD_MAX		(0x7 << 2)
+#define POWER_MANAGE		BIT(1)
+#define TER_RESISTORS_ON	BIT(0)
+
+#define BIASEXTR_SEL(val)	((val) & 0x7)
+#define BANDGAP_SEL(val)	((val) & 0x7)
+#define TLP_PROGRAM_EN		BIT(7)
+#define THS_PRE_PROGRAM_EN	BIT(7)
+#define THS_ZERO_PROGRAM_EN	BIT(6)
+
+#define PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL		0x10
+#define PLL_CP_CONTROL_PLL_LOCK_BYPASS			0x11
+#define PLL_LPF_AND_CP_CONTROL				0x12
+#define PLL_INPUT_DIVIDER_RATIO				0x17
+#define PLL_LOOP_DIVIDER_RATIO				0x18
+#define PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL	0x19
+#define BANDGAP_AND_BIAS_CONTROL			0x20
+#define TERMINATION_RESISTER_CONTROL			0x21
+#define AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY		0x22
+#define HS_RX_CONTROL_OF_LANE_0				0x44
+#define HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL	0x60
+#define HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL	0x61
+#define HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL	0x62
+#define HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL	0x63
+#define HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL	0x64
+#define HS_TX_CLOCK_LANE_POST_TIME_CONTROL		0x65
+#define HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL	0x70
+#define HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL	0x71
+#define HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL	0x72
+#define HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL	0x73
+#define HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL		0x74
+
+#define DW_MIPI_NEEDS_PHY_CFG_CLK	BIT(0)
+#define DW_MIPI_NEEDS_GRF_CLK		BIT(1)
+
+#define RK3288_GRF_SOC_CON6		0x025c
+#define RK3288_DSI0_LCDC_SEL		BIT(6)
+#define RK3288_DSI1_LCDC_SEL		BIT(9)
+
+#define RK3399_GRF_SOC_CON20		0x6250
+#define RK3399_DSI0_LCDC_SEL		BIT(0)
+#define RK3399_DSI1_LCDC_SEL		BIT(4)
+
+#define RK3399_GRF_SOC_CON22		0x6258
+#define RK3399_DSI0_TURNREQUEST		(0xf << 12)
+#define RK3399_DSI0_TURNDISABLE		(0xf << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE	(0xf << 4)
+#define RK3399_DSI0_FORCERXMODE		(0xf << 0)
+
+#define RK3399_GRF_SOC_CON23		0x625c
+#define RK3399_DSI1_TURNDISABLE		(0xf << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE	(0xf << 8)
+#define RK3399_DSI1_FORCERXMODE		(0xf << 4)
+#define RK3399_DSI1_ENABLE		(0xf << 0)
+
+#define RK3399_GRF_SOC_CON24		0x6260
+#define RK3399_TXRX_MASTERSLAVEZ	BIT(7)
+#define RK3399_TXRX_ENABLECLK		BIT(6)
+#define RK3399_TXRX_BASEDIR		BIT(5)
+
+#define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
+
+#define to_dsi(nm)	container_of(nm, struct dw_mipi_dsi_rockchip, nm)
+
+enum {
+	BANDGAP_97_07,
+	BANDGAP_98_05,
+	BANDGAP_99_02,
+	BANDGAP_100_00,
+	BANDGAP_93_17,
+	BANDGAP_94_15,
+	BANDGAP_95_12,
+	BANDGAP_96_10,
+};
+
+enum {
+	BIASEXTR_87_1,
+	BIASEXTR_91_5,
+	BIASEXTR_95_9,
+	BIASEXTR_100,
+	BIASEXTR_105_94,
+	BIASEXTR_111_88,
+	BIASEXTR_118_8,
+	BIASEXTR_127_7,
+};
+
+struct rockchip_dw_dsi_chip_data {
+	u32 reg;
+
+	u32 lcdsel_grf_reg;
+	u32 lcdsel_big;
+	u32 lcdsel_lit;
+
+	u32 enable_grf_reg;
+	u32 enable;
+
+	u32 lanecfg1_grf_reg;
+	u32 lanecfg1;
+	u32 lanecfg2_grf_reg;
+	u32 lanecfg2;
+
+	unsigned int flags;
+	unsigned int max_data_lanes;
+};
+
+struct dw_mipi_dsi_rockchip {
+	struct device *dev;
+	struct drm_encoder encoder;
+	void __iomem *base;
+
+	struct regmap *grf_regmap;
+	struct clk *pllref_clk;
+	struct clk *grf_clk;
+	struct clk *phy_cfg_clk;
+
+	/* dual-channel */
+	bool is_slave;
+	struct dw_mipi_dsi_rockchip *slave;
+
+	unsigned int lane_mbps; /* per lane */
+	u16 input_div;
+	u16 feedback_div;
+	u32 format;
+
+	struct dw_mipi_dsi *dmd;
+	const struct rockchip_dw_dsi_chip_data *cdata;
+	struct dw_mipi_dsi_plat_data pdata;
+	int devcnt;
+};
+
+struct dphy_pll_parameter_map {
+	unsigned int max_mbps;
+	u8 hsfreqrange;
+	u8 icpctrl;
+	u8 lpfctrl;
+};
+
+/* The table is based on 27MHz DPHY pll reference clock. */
+static const struct dphy_pll_parameter_map dppa_map[] = {
+	{  89, 0x00, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+	{  99, 0x10, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+	{ 109, 0x20, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+	{ 129, 0x01, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 139, 0x11, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 149, 0x21, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 169, 0x02, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+	{ 179, 0x12, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+	{ 199, 0x22, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+	{ 219, 0x03, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+	{ 239, 0x13, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+	{ 249, 0x23, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+	{ 269, 0x04, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+	{ 299, 0x14, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+	{ 329, 0x05, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 359, 0x15, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 399, 0x25, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+	{ 449, 0x06, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 499, 0x16, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 549, 0x07, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+	{ 599, 0x17, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+	{ 649, 0x08, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 699, 0x18, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 749, 0x09, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 799, 0x19, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 849, 0x29, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 899, 0x39, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+	{ 949, 0x0a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+	{ 999, 0x1a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+	{1049, 0x2a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+	{1099, 0x3a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+	{1149, 0x0b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1199, 0x1b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1249, 0x2b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1299, 0x3b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1349, 0x0c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1399, 0x1c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1449, 0x2c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+	{1500, 0x3c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM }
+};
+
+static int max_mbps_to_parameter(unsigned int max_mbps)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dppa_map); i++)
+		if (dppa_map[i].max_mbps >= max_mbps)
+			return i;
+
+	return -EINVAL;
+}
+
+static inline void dsi_write(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 val)
+{
+	writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
+{
+	return readl(dsi->base + reg);
+}
+
+static inline void dsi_set(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 mask)
+{
+	dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
+}
+
+static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
+				   u32 mask, u32 val)
+{
+	dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
+}
+
+static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
+				  u8 test_code,
+				  u8 test_data)
+{
+	/*
+	 * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
+	 * is latched internally as the current test code. Test data is
+	 * programmed internally by rising edge on TESTCLK.
+	 */
+	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+
+	dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
+					  PHY_TESTDIN(test_code));
+
+	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
+
+	dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
+					  PHY_TESTDIN(test_data));
+
+	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+}
+
+/**
+ * ns2bc - Nanoseconds to byte clock cycles
+ */
+static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+	return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
+}
+
+/**
+ * ns2ui - Nanoseconds to UI time periods
+ */
+static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+	return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
+}
+
+static int dw_mipi_dsi_phy_init(void *priv_data)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+	int ret, i, vco;
+
+	/*
+	 * Get vco from frequency(lane_mbps)
+	 * vco	frequency table
+	 * 000 - between   80 and  200 MHz
+	 * 001 - between  200 and  300 MHz
+	 * 010 - between  300 and  500 MHz
+	 * 011 - between  500 and  700 MHz
+	 * 100 - between  700 and  900 MHz
+	 * 101 - between  900 and 1100 MHz
+	 * 110 - between 1100 and 1300 MHz
+	 * 111 - between 1300 and 1500 MHz
+	 */
+	vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
+
+	i = max_mbps_to_parameter(dsi->lane_mbps);
+	if (i < 0) {
+		DRM_DEV_ERROR(dsi->dev,
+			      "failed to get parameter for %dmbps clock\n",
+			      dsi->lane_mbps);
+		return i;
+	}
+
+	ret = clk_prepare_enable(dsi->phy_cfg_clk);
+	if (ret) {
+		DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
+		return ret;
+	}
+
+	dw_mipi_dsi_phy_write(dsi, PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL,
+			      BYPASS_VCO_RANGE |
+			      VCO_RANGE_CON_SEL(vco) |
+			      VCO_IN_CAP_CON_LOW |
+			      REF_BIAS_CUR_SEL);
+
+	dw_mipi_dsi_phy_write(dsi, PLL_CP_CONTROL_PLL_LOCK_BYPASS,
+			      CP_CURRENT_SEL(dppa_map[i].icpctrl));
+	dw_mipi_dsi_phy_write(dsi, PLL_LPF_AND_CP_CONTROL,
+			      CP_PROGRAM_EN | LPF_PROGRAM_EN |
+			      LPF_RESISTORS_SEL(dppa_map[i].lpfctrl));
+
+	dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0,
+			      HSFREQRANGE_SEL(dppa_map[i].hsfreqrange));
+
+	dw_mipi_dsi_phy_write(dsi, PLL_INPUT_DIVIDER_RATIO,
+			      INPUT_DIVIDER(dsi->input_div));
+	dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+			      LOOP_DIV_LOW_SEL(dsi->feedback_div) |
+			      LOW_PROGRAM_EN);
+	/*
+	 * We need set PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL immediately
+	 * to make the configured LSB effective according to IP simulation
+	 * and lab test results.
+	 * Only in this way can we get correct mipi phy pll frequency.
+	 */
+	dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+			      PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+	dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+			      LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
+			      HIGH_PROGRAM_EN);
+	dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+			      PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+
+	dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+			      LOW_PROGRAM_EN | BIASEXTR_SEL(BIASEXTR_127_7));
+	dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+			      HIGH_PROGRAM_EN | BANDGAP_SEL(BANDGAP_96_10));
+
+	dw_mipi_dsi_phy_write(dsi, BANDGAP_AND_BIAS_CONTROL,
+			      POWER_CONTROL | INTERNAL_REG_CURRENT |
+			      BIAS_BLOCK_ON | BANDGAP_ON);
+
+	dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+			      TER_RESISTOR_LOW | TER_CAL_DONE |
+			      SETRD_MAX | TER_RESISTORS_ON);
+	dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+			      TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
+			      SETRD_MAX | POWER_MANAGE |
+			      TER_RESISTORS_ON);
+
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL,
+			      TLP_PROGRAM_EN | ns2bc(dsi, 500));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL,
+			      THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL,
+			      THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL,
+			      THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL,
+			      BIT(5) | ns2bc(dsi, 100));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_POST_TIME_CONTROL,
+			      BIT(5) | (ns2bc(dsi, 60) + 7));
+
+	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL,
+			      TLP_PROGRAM_EN | ns2bc(dsi, 500));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL,
+			      THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 20));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL,
+			      THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL,
+			      THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
+	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL,
+			      BIT(5) | ns2bc(dsi, 100));
+
+	clk_disable_unprepare(dsi->phy_cfg_clk);
+
+	return ret;
+}
+
+static int
+dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+			  unsigned long mode_flags, u32 lanes, u32 format,
+			  unsigned int *lane_mbps)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+	int bpp;
+	unsigned long mpclk, tmp;
+	unsigned int target_mbps = 1000;
+	unsigned int max_mbps = dppa_map[ARRAY_SIZE(dppa_map) - 1].max_mbps;
+	unsigned long best_freq = 0;
+	unsigned long fvco_min, fvco_max, fin, fout;
+	unsigned int min_prediv, max_prediv;
+	unsigned int _prediv, uninitialized_var(best_prediv);
+	unsigned long _fbdiv, uninitialized_var(best_fbdiv);
+	unsigned long min_delta = ULONG_MAX;
+
+	dsi->format = format;
+	bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+	if (bpp < 0) {
+		DRM_DEV_ERROR(dsi->dev,
+			      "failed to get bpp for pixel format %d\n",
+			      dsi->format);
+		return bpp;
+	}
+
+	mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
+	if (mpclk) {
+		/* take 1 / 0.8, since mbps must big than bandwidth of RGB */
+		tmp = mpclk * (bpp / lanes) * 10 / 8;
+		if (tmp < max_mbps)
+			target_mbps = tmp;
+		else
+			DRM_DEV_ERROR(dsi->dev,
+				      "DPHY clock frequency is out of range\n");
+	}
+
+	fin = clk_get_rate(dsi->pllref_clk);
+	fout = target_mbps * USEC_PER_SEC;
+
+	/* constraint: 5Mhz <= Fref / N <= 40MHz */
+	min_prediv = DIV_ROUND_UP(fin, 40 * USEC_PER_SEC);
+	max_prediv = fin / (5 * USEC_PER_SEC);
+
+	/* constraint: 80MHz <= Fvco <= 1500Mhz */
+	fvco_min = 80 * USEC_PER_SEC;
+	fvco_max = 1500 * USEC_PER_SEC;
+
+	for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+		u64 tmp;
+		u32 delta;
+		/* Fvco = Fref * M / N */
+		tmp = (u64)fout * _prediv;
+		do_div(tmp, fin);
+		_fbdiv = tmp;
+		/*
+		 * Due to the use of a "by 2 pre-scaler," the range of the
+		 * feedback multiplication value M is limited to even division
+		 * numbers, and m must be greater than 6, not bigger than 512.
+		 */
+		if (_fbdiv < 6 || _fbdiv > 512)
+			continue;
+
+		_fbdiv += _fbdiv % 2;
+
+		tmp = (u64)_fbdiv * fin;
+		do_div(tmp, _prediv);
+		if (tmp < fvco_min || tmp > fvco_max)
+			continue;
+
+		delta = abs(fout - tmp);
+		if (delta < min_delta) {
+			best_prediv = _prediv;
+			best_fbdiv = _fbdiv;
+			min_delta = delta;
+			best_freq = tmp;
+		}
+	}
+
+	if (best_freq) {
+		dsi->lane_mbps = DIV_ROUND_UP(best_freq, USEC_PER_SEC);
+		*lane_mbps = dsi->lane_mbps;
+		dsi->input_div = best_prediv;
+		dsi->feedback_div = best_fbdiv;
+	} else {
+		DRM_DEV_ERROR(dsi->dev, "Can not find best_freq for DPHY\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
+	.init = dw_mipi_dsi_phy_init,
+	.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+};
+
+static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
+					int mux)
+{
+	if (dsi->cdata->lcdsel_grf_reg)
+		regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+			mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+
+	if (dsi->cdata->lanecfg1_grf_reg)
+		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
+					      dsi->cdata->lanecfg1);
+
+	if (dsi->cdata->lanecfg2_grf_reg)
+		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
+					      dsi->cdata->lanecfg2);
+
+	if (dsi->cdata->enable_grf_reg)
+		regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
+					      dsi->cdata->enable);
+}
+
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		s->output_mode = ROCKCHIP_OUT_MODE_P888;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		s->output_mode = ROCKCHIP_OUT_MODE_P666;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		s->output_mode = ROCKCHIP_OUT_MODE_P565;
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	s->output_type = DRM_MODE_CONNECTOR_DSI;
+	if (dsi->slave)
+		s->output_flags = ROCKCHIP_OUTPUT_DSI_DUAL;
+
+	return 0;
+}
+
+static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+	int ret, mux;
+
+	mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
+						&dsi->encoder);
+	if (mux < 0)
+		return;
+
+	pm_runtime_get_sync(dsi->dev);
+	if (dsi->slave)
+		pm_runtime_get_sync(dsi->slave->dev);
+
+	/*
+	 * For the RK3399, the clk of grf must be enabled before writing grf
+	 * register. And for RK3288 or other soc, this grf_clk must be NULL,
+	 * the clk_prepare_enable return true directly.
+	 */
+	ret = clk_prepare_enable(dsi->grf_clk);
+	if (ret) {
+		DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+		return;
+	}
+
+	dw_mipi_dsi_rockchip_config(dsi, mux);
+	if (dsi->slave)
+		dw_mipi_dsi_rockchip_config(dsi->slave, mux);
+
+	clk_disable_unprepare(dsi->grf_clk);
+}
+
+static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+	if (dsi->slave)
+		pm_runtime_put(dsi->slave->dev);
+	pm_runtime_put(dsi->dev);
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi_encoder_helper_funcs = {
+	.atomic_check = dw_mipi_dsi_encoder_atomic_check,
+	.enable = dw_mipi_dsi_encoder_enable,
+	.disable = dw_mipi_dsi_encoder_disable,
+};
+
+static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
+					   struct drm_device *drm_dev)
+{
+	struct drm_encoder *encoder = &dsi->encoder;
+	int ret;
+
+	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+							     dsi->dev->of_node);
+
+	ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
+			       DRM_MODE_ENCODER_DSI, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to initialize encoder with drm\n");
+		return ret;
+	}
+
+	drm_encoder_helper_add(encoder, &dw_mipi_dsi_encoder_helper_funcs);
+
+	return 0;
+}
+
+static struct device
+*dw_mipi_dsi_rockchip_find_second(struct dw_mipi_dsi_rockchip *dsi)
+{
+	const struct of_device_id *match;
+	struct device_node *node = NULL, *local;
+
+	match = of_match_device(dsi->dev->driver->of_match_table, dsi->dev);
+
+	local = of_graph_get_remote_node(dsi->dev->of_node, 1, 0);
+	if (!local)
+		return NULL;
+
+	while ((node = of_find_compatible_node(node, NULL,
+					       match->compatible))) {
+		struct device_node *remote;
+
+		/* found ourself */
+		if (node == dsi->dev->of_node)
+			continue;
+
+		remote = of_graph_get_remote_node(node, 1, 0);
+		if (!remote)
+			continue;
+
+		/* same display device in port1-ep0 for both */
+		if (remote == local) {
+			struct dw_mipi_dsi_rockchip *dsi2;
+			struct platform_device *pdev;
+
+			pdev = of_find_device_by_node(node);
+
+			/*
+			 * we have found the second, so will either return it
+			 * or return with an error. In any case won't need the
+			 * nodes anymore nor continue the loop.
+			 */
+			of_node_put(remote);
+			of_node_put(node);
+			of_node_put(local);
+
+			if (!pdev)
+				return ERR_PTR(-EPROBE_DEFER);
+
+			dsi2 = platform_get_drvdata(pdev);
+			if (!dsi2) {
+				platform_device_put(pdev);
+				return ERR_PTR(-EPROBE_DEFER);
+			}
+
+			return &pdev->dev;
+		}
+
+		of_node_put(remote);
+	}
+
+	of_node_put(local);
+
+	return NULL;
+}
+
+static int dw_mipi_dsi_rockchip_bind(struct device *dev,
+				     struct device *master,
+				     void *data)
+{
+	struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct device *second;
+	bool master1, master2;
+	int ret;
+
+	second = dw_mipi_dsi_rockchip_find_second(dsi);
+	if (IS_ERR(second))
+		return PTR_ERR(second);
+
+	if (second) {
+		master1 = of_property_read_bool(dsi->dev->of_node,
+						"clock-master");
+		master2 = of_property_read_bool(second->of_node,
+						"clock-master");
+
+		if (master1 && master2) {
+			DRM_DEV_ERROR(dsi->dev, "only one clock-master allowed\n");
+			return -EINVAL;
+		}
+
+		if (!master1 && !master2) {
+			DRM_DEV_ERROR(dsi->dev, "no clock-master defined\n");
+			return -EINVAL;
+		}
+
+		/* we are the slave in dual-DSI */
+		if (!master1) {
+			dsi->is_slave = true;
+			return 0;
+		}
+
+		dsi->slave = dev_get_drvdata(second);
+		if (!dsi->slave) {
+			DRM_DEV_ERROR(dev, "could not get slaves data\n");
+			return -ENODEV;
+		}
+
+		dsi->slave->is_slave = true;
+		dw_mipi_dsi_set_slave(dsi->dmd, dsi->slave->dmd);
+		put_device(second);
+	}
+
+	ret = clk_prepare_enable(dsi->pllref_clk);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
+		return ret;
+	}
+
+	ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
+		return ret;
+	}
+
+	ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
+					struct device *master,
+					void *data)
+{
+	struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+
+	if (dsi->is_slave)
+		return;
+
+	dw_mipi_dsi_unbind(dsi->dmd);
+
+	clk_disable_unprepare(dsi->pllref_clk);
+}
+
+static const struct component_ops dw_mipi_dsi_rockchip_ops = {
+	.bind	= dw_mipi_dsi_rockchip_bind,
+	.unbind	= dw_mipi_dsi_rockchip_unbind,
+};
+
+static int dw_mipi_dsi_rockchip_host_attach(void *priv_data,
+					    struct mipi_dsi_device *device)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+	struct device *second;
+	int ret;
+
+	ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+	if (ret) {
+		DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n",
+					ret);
+		return ret;
+	}
+
+	second = dw_mipi_dsi_rockchip_find_second(dsi);
+	if (IS_ERR(second))
+		return PTR_ERR(second);
+	if (second) {
+		ret = component_add(second, &dw_mipi_dsi_rockchip_ops);
+		if (ret) {
+			DRM_DEV_ERROR(second,
+				      "Failed to register component: %d\n",
+				      ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dw_mipi_dsi_rockchip_host_detach(void *priv_data,
+					    struct mipi_dsi_device *device)
+{
+	struct dw_mipi_dsi_rockchip *dsi = priv_data;
+	struct device *second;
+
+	second = dw_mipi_dsi_rockchip_find_second(dsi);
+	if (second && !IS_ERR(second))
+		component_del(second, &dw_mipi_dsi_rockchip_ops);
+
+	component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+	return 0;
+}
+
+static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = {
+	.attach = dw_mipi_dsi_rockchip_host_attach,
+	.detach = dw_mipi_dsi_rockchip_host_detach,
+};
+
+static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct dw_mipi_dsi_rockchip *dsi;
+	struct resource *res;
+	const struct rockchip_dw_dsi_chip_data *cdata =
+				of_device_get_match_data(dev);
+	int ret, i;
+
+	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dsi->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dsi->base)) {
+		DRM_DEV_ERROR(dev, "Unable to get dsi registers\n");
+		return PTR_ERR(dsi->base);
+	}
+
+	i = 0;
+	while (cdata[i].reg) {
+		if (cdata[i].reg == res->start) {
+			dsi->cdata = &cdata[i];
+			break;
+		}
+
+		i++;
+	}
+
+	if (!dsi->cdata) {
+		dev_err(dev, "no dsi-config for %s node\n", np->name);
+		return -EINVAL;
+	}
+
+	dsi->pllref_clk = devm_clk_get(dev, "ref");
+	if (IS_ERR(dsi->pllref_clk)) {
+		ret = PTR_ERR(dsi->pllref_clk);
+		DRM_DEV_ERROR(dev,
+			      "Unable to get pll reference clock: %d\n", ret);
+		return ret;
+	}
+
+	if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
+		dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
+		if (IS_ERR(dsi->phy_cfg_clk)) {
+			ret = PTR_ERR(dsi->phy_cfg_clk);
+			DRM_DEV_ERROR(dev,
+				      "Unable to get phy_cfg_clk: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (dsi->cdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
+		dsi->grf_clk = devm_clk_get(dev, "grf");
+		if (IS_ERR(dsi->grf_clk)) {
+			ret = PTR_ERR(dsi->grf_clk);
+			DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
+			return ret;
+		}
+	}
+
+	dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	if (IS_ERR(dsi->grf_regmap)) {
+		DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+		return PTR_ERR(dsi->grf_regmap);
+	}
+
+	dsi->dev = dev;
+	dsi->pdata.base = dsi->base;
+	dsi->pdata.max_data_lanes = dsi->cdata->max_data_lanes;
+	dsi->pdata.phy_ops = &dw_mipi_dsi_rockchip_phy_ops;
+	dsi->pdata.host_ops = &dw_mipi_dsi_rockchip_host_ops;
+	dsi->pdata.priv_data = dsi;
+	platform_set_drvdata(pdev, dsi);
+
+	dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata);
+	if (IS_ERR(dsi->dmd)) {
+		ret = PTR_ERR(dsi->dmd);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to probe dw_mipi_dsi: %d\n", ret);
+		goto err_clkdisable;
+	}
+
+	return 0;
+
+err_clkdisable:
+	clk_disable_unprepare(dsi->pllref_clk);
+	return ret;
+}
+
+static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+{
+	struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
+
+	if (dsi->devcnt == 0)
+		component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+	dw_mipi_dsi_remove(dsi->dmd);
+
+	return 0;
+}
+
+static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
+	{
+		.reg = 0xff960000,
+		.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+		.lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
+		.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+
+		.max_data_lanes = 4,
+	},
+	{
+		.reg = 0xff964000,
+		.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+		.lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
+		.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+
+		.max_data_lanes = 4,
+	},
+	{ /* sentinel */ }
+};
+
+static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
+	{
+		.reg = 0xff960000,
+		.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+		.lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
+		.lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
+					    RK3399_DSI0_LCDC_SEL),
+
+		.lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
+		.lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
+					     RK3399_DSI0_TURNDISABLE |
+					     RK3399_DSI0_FORCETXSTOPMODE |
+					     RK3399_DSI0_FORCERXMODE),
+
+		.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+		.max_data_lanes = 4,
+	},
+	{
+		.reg = 0xff968000,
+		.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+		.lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
+		.lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
+					    RK3399_DSI1_LCDC_SEL),
+
+		.lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
+		.lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
+					     RK3399_DSI1_FORCETXSTOPMODE |
+					     RK3399_DSI1_FORCERXMODE |
+					     RK3399_DSI1_ENABLE),
+
+		.lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
+		.lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
+					  RK3399_TXRX_ENABLECLK,
+					  RK3399_TXRX_MASTERSLAVEZ |
+					  RK3399_TXRX_ENABLECLK |
+					  RK3399_TXRX_BASEDIR),
+
+		.enable_grf_reg = RK3399_GRF_SOC_CON23,
+		.enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+
+		.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+		.max_data_lanes = 4,
+	},
+	{ /* sentinel */ }
+};
+
+static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
+	{
+	 .compatible = "rockchip,rk3288-mipi-dsi",
+	 .data = &rk3288_chip_data,
+	}, {
+	 .compatible = "rockchip,rk3399-mipi-dsi",
+	 .data = &rk3399_chip_data,
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi_rockchip_driver = {
+	.probe		= dw_mipi_dsi_rockchip_probe,
+	.remove		= dw_mipi_dsi_rockchip_remove,
+	.driver		= {
+		.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+		.name	= "dw-mipi-dsi-rockchip",
+	},
+};
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
deleted file mode 100644
index 662b6cb..0000000
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-/*
- * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/iopoll.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/mfd/syscon.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drmP.h>
-#include <video/mipi_display.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
-
-#define DRIVER_NAME    "dw-mipi-dsi"
-
-#define RK3288_GRF_SOC_CON6		0x025c
-#define RK3288_DSI0_SEL_VOP_LIT		BIT(6)
-#define RK3288_DSI1_SEL_VOP_LIT		BIT(9)
-
-#define RK3399_GRF_SOC_CON20		0x6250
-#define RK3399_DSI0_SEL_VOP_LIT		BIT(0)
-#define RK3399_DSI1_SEL_VOP_LIT		BIT(4)
-
-/* disable turnrequest, turndisable, forcetxstopmode, forcerxmode */
-#define RK3399_GRF_SOC_CON22		0x6258
-#define RK3399_GRF_DSI_MODE		0xffff0000
-
-#define DSI_VERSION			0x00
-#define DSI_PWR_UP			0x04
-#define RESET				0
-#define POWERUP				BIT(0)
-
-#define DSI_CLKMGR_CFG			0x08
-#define TO_CLK_DIVIDSION(div)		(((div) & 0xff) << 8)
-#define TX_ESC_CLK_DIVIDSION(div)	(((div) & 0xff) << 0)
-
-#define DSI_DPI_VCID			0x0c
-#define DPI_VID(vid)			(((vid) & 0x3) << 0)
-
-#define DSI_DPI_COLOR_CODING		0x10
-#define EN18_LOOSELY			BIT(8)
-#define DPI_COLOR_CODING_16BIT_1	0x0
-#define DPI_COLOR_CODING_16BIT_2	0x1
-#define DPI_COLOR_CODING_16BIT_3	0x2
-#define DPI_COLOR_CODING_18BIT_1	0x3
-#define DPI_COLOR_CODING_18BIT_2	0x4
-#define DPI_COLOR_CODING_24BIT		0x5
-
-#define DSI_DPI_CFG_POL			0x14
-#define COLORM_ACTIVE_LOW		BIT(4)
-#define SHUTD_ACTIVE_LOW		BIT(3)
-#define HSYNC_ACTIVE_LOW		BIT(2)
-#define VSYNC_ACTIVE_LOW		BIT(1)
-#define DATAEN_ACTIVE_LOW		BIT(0)
-
-#define DSI_DPI_LP_CMD_TIM		0x18
-#define OUTVACT_LPCMD_TIME(p)		(((p) & 0xff) << 16)
-#define INVACT_LPCMD_TIME(p)		((p) & 0xff)
-
-#define DSI_DBI_CFG			0x20
-#define DSI_DBI_CMDSIZE			0x28
-
-#define DSI_PCKHDL_CFG			0x2c
-#define EN_CRC_RX			BIT(4)
-#define EN_ECC_RX			BIT(3)
-#define EN_BTA				BIT(2)
-#define EN_EOTP_RX			BIT(1)
-#define EN_EOTP_TX			BIT(0)
-
-#define DSI_MODE_CFG			0x34
-#define ENABLE_VIDEO_MODE		0
-#define ENABLE_CMD_MODE			BIT(0)
-
-#define DSI_VID_MODE_CFG		0x38
-#define FRAME_BTA_ACK			BIT(14)
-#define ENABLE_LOW_POWER		(0x3f << 8)
-#define ENABLE_LOW_POWER_MASK		(0x3f << 8)
-#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES	0x0
-#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS	0x1
-#define VID_MODE_TYPE_BURST			0x2
-#define VID_MODE_TYPE_MASK			0x3
-
-#define DSI_VID_PKT_SIZE		0x3c
-#define VID_PKT_SIZE(p)			(((p) & 0x3fff) << 0)
-#define VID_PKT_MAX_SIZE		0x3fff
-
-#define DSI_VID_HSA_TIME		0x48
-#define DSI_VID_HBP_TIME		0x4c
-#define DSI_VID_HLINE_TIME		0x50
-#define DSI_VID_VSA_LINES		0x54
-#define DSI_VID_VBP_LINES		0x58
-#define DSI_VID_VFP_LINES		0x5c
-#define DSI_VID_VACTIVE_LINES		0x60
-#define DSI_CMD_MODE_CFG		0x68
-#define MAX_RD_PKT_SIZE_LP		BIT(24)
-#define DCS_LW_TX_LP			BIT(19)
-#define DCS_SR_0P_TX_LP			BIT(18)
-#define DCS_SW_1P_TX_LP			BIT(17)
-#define DCS_SW_0P_TX_LP			BIT(16)
-#define GEN_LW_TX_LP			BIT(14)
-#define GEN_SR_2P_TX_LP			BIT(13)
-#define GEN_SR_1P_TX_LP			BIT(12)
-#define GEN_SR_0P_TX_LP			BIT(11)
-#define GEN_SW_2P_TX_LP			BIT(10)
-#define GEN_SW_1P_TX_LP			BIT(9)
-#define GEN_SW_0P_TX_LP			BIT(8)
-#define EN_ACK_RQST			BIT(1)
-#define EN_TEAR_FX			BIT(0)
-
-#define CMD_MODE_ALL_LP			(MAX_RD_PKT_SIZE_LP | \
-					 DCS_LW_TX_LP | \
-					 DCS_SR_0P_TX_LP | \
-					 DCS_SW_1P_TX_LP | \
-					 DCS_SW_0P_TX_LP | \
-					 GEN_LW_TX_LP | \
-					 GEN_SR_2P_TX_LP | \
-					 GEN_SR_1P_TX_LP | \
-					 GEN_SR_0P_TX_LP | \
-					 GEN_SW_2P_TX_LP | \
-					 GEN_SW_1P_TX_LP | \
-					 GEN_SW_0P_TX_LP)
-
-#define DSI_GEN_HDR			0x6c
-#define GEN_HDATA(data)			(((data) & 0xffff) << 8)
-#define GEN_HDATA_MASK			(0xffff << 8)
-#define GEN_HTYPE(type)			(((type) & 0xff) << 0)
-#define GEN_HTYPE_MASK			0xff
-
-#define DSI_GEN_PLD_DATA		0x70
-
-#define DSI_CMD_PKT_STATUS		0x74
-#define GEN_CMD_EMPTY			BIT(0)
-#define GEN_CMD_FULL			BIT(1)
-#define GEN_PLD_W_EMPTY			BIT(2)
-#define GEN_PLD_W_FULL			BIT(3)
-#define GEN_PLD_R_EMPTY			BIT(4)
-#define GEN_PLD_R_FULL			BIT(5)
-#define GEN_RD_CMD_BUSY			BIT(6)
-
-#define DSI_TO_CNT_CFG			0x78
-#define HSTX_TO_CNT(p)			(((p) & 0xffff) << 16)
-#define LPRX_TO_CNT(p)			((p) & 0xffff)
-
-#define DSI_BTA_TO_CNT			0x8c
-#define DSI_LPCLK_CTRL			0x94
-#define AUTO_CLKLANE_CTRL		BIT(1)
-#define PHY_TXREQUESTCLKHS		BIT(0)
-
-#define DSI_PHY_TMR_LPCLK_CFG		0x98
-#define PHY_CLKHS2LP_TIME(lbcc)		(((lbcc) & 0x3ff) << 16)
-#define PHY_CLKLP2HS_TIME(lbcc)		((lbcc) & 0x3ff)
-
-#define DSI_PHY_TMR_CFG			0x9c
-#define PHY_HS2LP_TIME(lbcc)		(((lbcc) & 0xff) << 24)
-#define PHY_LP2HS_TIME(lbcc)		(((lbcc) & 0xff) << 16)
-#define MAX_RD_TIME(lbcc)		((lbcc) & 0x7fff)
-
-#define DSI_PHY_RSTZ			0xa0
-#define PHY_DISFORCEPLL			0
-#define PHY_ENFORCEPLL			BIT(3)
-#define PHY_DISABLECLK			0
-#define PHY_ENABLECLK			BIT(2)
-#define PHY_RSTZ			0
-#define PHY_UNRSTZ			BIT(1)
-#define PHY_SHUTDOWNZ			0
-#define PHY_UNSHUTDOWNZ			BIT(0)
-
-#define DSI_PHY_IF_CFG			0xa4
-#define N_LANES(n)			((((n) - 1) & 0x3) << 0)
-#define PHY_STOP_WAIT_TIME(cycle)	(((cycle) & 0xff) << 8)
-
-#define DSI_PHY_STATUS			0xb0
-#define LOCK				BIT(0)
-#define STOP_STATE_CLK_LANE		BIT(2)
-
-#define DSI_PHY_TST_CTRL0		0xb4
-#define PHY_TESTCLK			BIT(1)
-#define PHY_UNTESTCLK			0
-#define PHY_TESTCLR			BIT(0)
-#define PHY_UNTESTCLR			0
-
-#define DSI_PHY_TST_CTRL1		0xb8
-#define PHY_TESTEN			BIT(16)
-#define PHY_UNTESTEN			0
-#define PHY_TESTDOUT(n)			(((n) & 0xff) << 8)
-#define PHY_TESTDIN(n)			(((n) & 0xff) << 0)
-
-#define DSI_INT_ST0			0xbc
-#define DSI_INT_ST1			0xc0
-#define DSI_INT_MSK0			0xc4
-#define DSI_INT_MSK1			0xc8
-
-#define PHY_STATUS_TIMEOUT_US		10000
-#define CMD_PKT_STATUS_TIMEOUT_US	20000
-
-#define BYPASS_VCO_RANGE	BIT(7)
-#define VCO_RANGE_CON_SEL(val)	(((val) & 0x7) << 3)
-#define VCO_IN_CAP_CON_DEFAULT	(0x0 << 1)
-#define VCO_IN_CAP_CON_LOW	(0x1 << 1)
-#define VCO_IN_CAP_CON_HIGH	(0x2 << 1)
-#define REF_BIAS_CUR_SEL	BIT(0)
-
-#define CP_CURRENT_3MA		BIT(3)
-#define CP_PROGRAM_EN		BIT(7)
-#define LPF_PROGRAM_EN		BIT(6)
-#define LPF_RESISTORS_20_KOHM	0
-
-#define HSFREQRANGE_SEL(val)	(((val) & 0x3f) << 1)
-
-#define INPUT_DIVIDER(val)	(((val) - 1) & 0x7f)
-#define LOW_PROGRAM_EN		0
-#define HIGH_PROGRAM_EN		BIT(7)
-#define LOOP_DIV_LOW_SEL(val)	(((val) - 1) & 0x1f)
-#define LOOP_DIV_HIGH_SEL(val)	((((val) - 1) >> 5) & 0x1f)
-#define PLL_LOOP_DIV_EN		BIT(5)
-#define PLL_INPUT_DIV_EN	BIT(4)
-
-#define POWER_CONTROL		BIT(6)
-#define INTERNAL_REG_CURRENT	BIT(3)
-#define BIAS_BLOCK_ON		BIT(2)
-#define BANDGAP_ON		BIT(0)
-
-#define TER_RESISTOR_HIGH	BIT(7)
-#define	TER_RESISTOR_LOW	0
-#define LEVEL_SHIFTERS_ON	BIT(6)
-#define TER_CAL_DONE		BIT(5)
-#define SETRD_MAX		(0x7 << 2)
-#define POWER_MANAGE		BIT(1)
-#define TER_RESISTORS_ON	BIT(0)
-
-#define BIASEXTR_SEL(val)	((val) & 0x7)
-#define BANDGAP_SEL(val)	((val) & 0x7)
-#define TLP_PROGRAM_EN		BIT(7)
-#define THS_PRE_PROGRAM_EN	BIT(7)
-#define THS_ZERO_PROGRAM_EN	BIT(6)
-
-#define DW_MIPI_NEEDS_PHY_CFG_CLK	BIT(0)
-#define DW_MIPI_NEEDS_GRF_CLK		BIT(1)
-
-enum {
-	BANDGAP_97_07,
-	BANDGAP_98_05,
-	BANDGAP_99_02,
-	BANDGAP_100_00,
-	BANDGAP_93_17,
-	BANDGAP_94_15,
-	BANDGAP_95_12,
-	BANDGAP_96_10,
-};
-
-enum {
-	BIASEXTR_87_1,
-	BIASEXTR_91_5,
-	BIASEXTR_95_9,
-	BIASEXTR_100,
-	BIASEXTR_105_94,
-	BIASEXTR_111_88,
-	BIASEXTR_118_8,
-	BIASEXTR_127_7,
-};
-
-struct dw_mipi_dsi_plat_data {
-	u32 dsi0_en_bit;
-	u32 dsi1_en_bit;
-	u32 grf_switch_reg;
-	u32 grf_dsi0_mode;
-	u32 grf_dsi0_mode_reg;
-	unsigned int flags;
-	unsigned int max_data_lanes;
-};
-
-struct dw_mipi_dsi {
-	struct drm_encoder encoder;
-	struct drm_connector connector;
-	struct mipi_dsi_host dsi_host;
-	struct drm_panel *panel;
-	struct device *dev;
-	struct regmap *grf_regmap;
-	void __iomem *base;
-
-	struct clk *grf_clk;
-	struct clk *pllref_clk;
-	struct clk *pclk;
-	struct clk *phy_cfg_clk;
-
-	int dpms_mode;
-	unsigned int lane_mbps; /* per lane */
-	u32 channel;
-	u32 lanes;
-	u32 format;
-	u16 input_div;
-	u16 feedback_div;
-	unsigned long mode_flags;
-
-	const struct dw_mipi_dsi_plat_data *pdata;
-};
-
-enum dw_mipi_dsi_mode {
-	DW_MIPI_DSI_CMD_MODE,
-	DW_MIPI_DSI_VID_MODE,
-};
-
-struct dphy_pll_testdin_map {
-	unsigned int max_mbps;
-	u8 testdin;
-};
-
-/* The table is based on 27MHz DPHY pll reference clock. */
-static const struct dphy_pll_testdin_map dptdin_map[] = {
-	{  90, 0x00}, { 100, 0x10}, { 110, 0x20}, { 130, 0x01},
-	{ 140, 0x11}, { 150, 0x21}, { 170, 0x02}, { 180, 0x12},
-	{ 200, 0x22}, { 220, 0x03}, { 240, 0x13}, { 250, 0x23},
-	{ 270, 0x04}, { 300, 0x14}, { 330, 0x05}, { 360, 0x15},
-	{ 400, 0x25}, { 450, 0x06}, { 500, 0x16}, { 550, 0x07},
-	{ 600, 0x17}, { 650, 0x08}, { 700, 0x18}, { 750, 0x09},
-	{ 800, 0x19}, { 850, 0x29}, { 900, 0x39}, { 950, 0x0a},
-	{1000, 0x1a}, {1050, 0x2a}, {1100, 0x3a}, {1150, 0x0b},
-	{1200, 0x1b}, {1250, 0x2b}, {1300, 0x3b}, {1350, 0x0c},
-	{1400, 0x1c}, {1450, 0x2c}, {1500, 0x3c}
-};
-
-static int max_mbps_to_testdin(unsigned int max_mbps)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(dptdin_map); i++)
-		if (dptdin_map[i].max_mbps > max_mbps)
-			return dptdin_map[i].testdin;
-
-	return -EINVAL;
-}
-
-/*
- * The controller should generate 2 frames before
- * preparing the peripheral.
- */
-static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
-{
-	int refresh, two_frames;
-
-	refresh = drm_mode_vrefresh(mode);
-	two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
-	msleep(two_frames);
-}
-
-static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
-{
-	return container_of(host, struct dw_mipi_dsi, dsi_host);
-}
-
-static inline struct dw_mipi_dsi *con_to_dsi(struct drm_connector *con)
-{
-	return container_of(con, struct dw_mipi_dsi, connector);
-}
-
-static inline struct dw_mipi_dsi *encoder_to_dsi(struct drm_encoder *encoder)
-{
-	return container_of(encoder, struct dw_mipi_dsi, encoder);
-}
-
-static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
-{
-	writel(val, dsi->base + reg);
-}
-
-static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
-{
-	return readl(dsi->base + reg);
-}
-
-static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi *dsi, u8 test_code,
-				  u8 test_data)
-{
-	/*
-	 * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
-	 * is latched internally as the current test code. Test data is
-	 * programmed internally by rising edge on TESTCLK.
-	 */
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-
-	dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
-					  PHY_TESTDIN(test_code));
-
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
-
-	dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
-					  PHY_TESTDIN(test_data));
-
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-}
-
-/**
- * ns2bc - Nanoseconds to byte clock cycles
- */
-static inline unsigned int ns2bc(struct dw_mipi_dsi *dsi, int ns)
-{
-	return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
-}
-
-/**
- * ns2ui - Nanoseconds to UI time periods
- */
-static inline unsigned int ns2ui(struct dw_mipi_dsi *dsi, int ns)
-{
-	return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
-}
-
-static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
-{
-	int ret, testdin, vco, val;
-
-	vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
-
-	testdin = max_mbps_to_testdin(dsi->lane_mbps);
-	if (testdin < 0) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "failed to get testdin for %dmbps lane clock\n",
-			      dsi->lane_mbps);
-		return testdin;
-	}
-
-	/* Start by clearing PHY state */
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR);
-	dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
-
-	ret = clk_prepare_enable(dsi->phy_cfg_clk);
-	if (ret) {
-		DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
-		return ret;
-	}
-
-	dw_mipi_dsi_phy_write(dsi, 0x10, BYPASS_VCO_RANGE |
-					 VCO_RANGE_CON_SEL(vco) |
-					 VCO_IN_CAP_CON_LOW |
-					 REF_BIAS_CUR_SEL);
-
-	dw_mipi_dsi_phy_write(dsi, 0x11, CP_CURRENT_3MA);
-	dw_mipi_dsi_phy_write(dsi, 0x12, CP_PROGRAM_EN | LPF_PROGRAM_EN |
-					 LPF_RESISTORS_20_KOHM);
-
-	dw_mipi_dsi_phy_write(dsi, 0x44, HSFREQRANGE_SEL(testdin));
-
-	dw_mipi_dsi_phy_write(dsi, 0x17, INPUT_DIVIDER(dsi->input_div));
-	dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_LOW_SEL(dsi->feedback_div) |
-					 LOW_PROGRAM_EN);
-	dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
-					 HIGH_PROGRAM_EN);
-	dw_mipi_dsi_phy_write(dsi, 0x19, PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
-
-	dw_mipi_dsi_phy_write(dsi, 0x22, LOW_PROGRAM_EN |
-					 BIASEXTR_SEL(BIASEXTR_127_7));
-	dw_mipi_dsi_phy_write(dsi, 0x22, HIGH_PROGRAM_EN |
-					 BANDGAP_SEL(BANDGAP_96_10));
-
-	dw_mipi_dsi_phy_write(dsi, 0x20, POWER_CONTROL | INTERNAL_REG_CURRENT |
-					 BIAS_BLOCK_ON | BANDGAP_ON);
-
-	dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_LOW | TER_CAL_DONE |
-					 SETRD_MAX | TER_RESISTORS_ON);
-	dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
-					 SETRD_MAX | POWER_MANAGE |
-					 TER_RESISTORS_ON);
-
-	dw_mipi_dsi_phy_write(dsi, 0x60, TLP_PROGRAM_EN | ns2bc(dsi, 500));
-	dw_mipi_dsi_phy_write(dsi, 0x61, THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
-	dw_mipi_dsi_phy_write(dsi, 0x62, THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
-	dw_mipi_dsi_phy_write(dsi, 0x63, THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
-	dw_mipi_dsi_phy_write(dsi, 0x64, BIT(5) | ns2bc(dsi, 100));
-	dw_mipi_dsi_phy_write(dsi, 0x65, BIT(5) | (ns2bc(dsi, 60) + 7));
-
-	dw_mipi_dsi_phy_write(dsi, 0x70, TLP_PROGRAM_EN | ns2bc(dsi, 500));
-	dw_mipi_dsi_phy_write(dsi, 0x71,
-			      THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 5));
-	dw_mipi_dsi_phy_write(dsi, 0x72,
-			      THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
-	dw_mipi_dsi_phy_write(dsi, 0x73,
-			      THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
-	dw_mipi_dsi_phy_write(dsi, 0x74, BIT(5) | ns2bc(dsi, 100));
-
-	dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
-				     PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
-
-	ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
-				 val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n");
-		goto phy_init_end;
-	}
-
-	ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
-				 val, val & STOP_STATE_CLK_LANE, 1000,
-				 PHY_STATUS_TIMEOUT_US);
-	if (ret < 0)
-		DRM_DEV_ERROR(dsi->dev,
-			      "failed to wait for phy clk lane stop state\n");
-
-phy_init_end:
-	clk_disable_unprepare(dsi->phy_cfg_clk);
-
-	return ret;
-}
-
-static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
-				    struct drm_display_mode *mode)
-{
-	unsigned int i, pre;
-	unsigned long mpclk, pllref, tmp;
-	unsigned int m = 1, n = 1, target_mbps = 1000;
-	unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
-	int bpp;
-
-	bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-	if (bpp < 0) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "failed to get bpp for pixel format %d\n",
-			      dsi->format);
-		return bpp;
-	}
-
-	mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
-	if (mpclk) {
-		/* take 1 / 0.8, since mbps must big than bandwidth of RGB */
-		tmp = mpclk * (bpp / dsi->lanes) * 10 / 8;
-		if (tmp < max_mbps)
-			target_mbps = tmp;
-		else
-			DRM_DEV_ERROR(dsi->dev,
-				      "DPHY clock frequency is out of range\n");
-	}
-
-	pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
-	tmp = pllref;
-
-	/*
-	 * The limits on the PLL divisor are:
-	 *
-	 *	5MHz <= (pllref / n) <= 40MHz
-	 *
-	 * we walk over these values in descreasing order so that if we hit
-	 * an exact match for target_mbps it is more likely that "m" will be
-	 * even.
-	 *
-	 * TODO: ensure that "m" is even after this loop.
-	 */
-	for (i = pllref / 5; i > (pllref / 40); i--) {
-		pre = pllref / i;
-		if ((tmp > (target_mbps % pre)) && (target_mbps / pre < 512)) {
-			tmp = target_mbps % pre;
-			n = i;
-			m = target_mbps / pre;
-		}
-		if (tmp == 0)
-			break;
-	}
-
-	dsi->lane_mbps = pllref / n * m;
-	dsi->input_div = n;
-	dsi->feedback_div = m;
-
-	return 0;
-}
-
-static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
-				   struct mipi_dsi_device *device)
-{
-	struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
-	if (device->lanes > dsi->pdata->max_data_lanes) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "the number of data lanes(%u) is too many\n",
-			      device->lanes);
-		return -EINVAL;
-	}
-
-	dsi->lanes = device->lanes;
-	dsi->channel = device->channel;
-	dsi->format = device->format;
-	dsi->mode_flags = device->mode_flags;
-	dsi->panel = of_drm_find_panel(device->dev.of_node);
-	if (!IS_ERR(dsi->panel))
-		return drm_panel_attach(dsi->panel, &dsi->connector);
-
-	return -EINVAL;
-}
-
-static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
-				   struct mipi_dsi_device *device)
-{
-	struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
-	drm_panel_detach(dsi->panel);
-
-	return 0;
-}
-
-static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
-				   const struct mipi_dsi_msg *msg)
-{
-	bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
-	u32 val = 0;
-
-	if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
-		val |= EN_ACK_RQST;
-	if (lpm)
-		val |= CMD_MODE_ALL_LP;
-
-	dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
-	dsi_write(dsi, DSI_CMD_MODE_CFG, val);
-}
-
-static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
-{
-	int ret;
-	u32 val, mask;
-
-	ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
-				 val, !(val & GEN_CMD_FULL), 1000,
-				 CMD_PKT_STATUS_TIMEOUT_US);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "failed to get available command FIFO\n");
-		return ret;
-	}
-
-	dsi_write(dsi, DSI_GEN_HDR, hdr_val);
-
-	mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
-	ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
-				 val, (val & mask) == mask,
-				 1000, CMD_PKT_STATUS_TIMEOUT_US);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dsi->dev, "failed to write command FIFO\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
-				       const struct mipi_dsi_msg *msg)
-{
-	const u8 *tx_buf = msg->tx_buf;
-	u16 data = 0;
-	u32 val;
-
-	if (msg->tx_len > 0)
-		data |= tx_buf[0];
-	if (msg->tx_len > 1)
-		data |= tx_buf[1] << 8;
-
-	if (msg->tx_len > 2) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "too long tx buf length %zu for short write\n",
-			      msg->tx_len);
-		return -EINVAL;
-	}
-
-	val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
-	return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
-}
-
-static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
-				      const struct mipi_dsi_msg *msg)
-{
-	const u8 *tx_buf = msg->tx_buf;
-	int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
-	u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
-	u32 remainder;
-	u32 val;
-
-	if (msg->tx_len < 3) {
-		DRM_DEV_ERROR(dsi->dev,
-			      "wrong tx buf length %zu for long write\n",
-			      msg->tx_len);
-		return -EINVAL;
-	}
-
-	while (DIV_ROUND_UP(len, pld_data_bytes)) {
-		if (len < pld_data_bytes) {
-			remainder = 0;
-			memcpy(&remainder, tx_buf, len);
-			dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
-			len = 0;
-		} else {
-			memcpy(&remainder, tx_buf, pld_data_bytes);
-			dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
-			tx_buf += pld_data_bytes;
-			len -= pld_data_bytes;
-		}
-
-		ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
-					 val, !(val & GEN_PLD_W_FULL), 1000,
-					 CMD_PKT_STATUS_TIMEOUT_US);
-		if (ret < 0) {
-			DRM_DEV_ERROR(dsi->dev,
-				      "failed to get available write payload FIFO\n");
-			return ret;
-		}
-	}
-
-	return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
-}
-
-static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
-					 const struct mipi_dsi_msg *msg)
-{
-	struct dw_mipi_dsi *dsi = host_to_dsi(host);
-	int ret;
-
-	dw_mipi_message_config(dsi, msg);
-
-	switch (msg->type) {
-	case MIPI_DSI_DCS_SHORT_WRITE:
-	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
-	case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
-		ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
-		break;
-	case MIPI_DSI_DCS_LONG_WRITE:
-		ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
-		break;
-	default:
-		DRM_DEV_ERROR(dsi->dev, "unsupported message type 0x%02x\n",
-			      msg->type);
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
-	.attach = dw_mipi_dsi_host_attach,
-	.detach = dw_mipi_dsi_host_detach,
-	.transfer = dw_mipi_dsi_host_transfer,
-};
-
-static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
-{
-	u32 val;
-
-	val = ENABLE_LOW_POWER;
-
-	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
-		val |= VID_MODE_TYPE_BURST;
-	else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
-		val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
-	else
-		val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
-
-	dsi_write(dsi, DSI_VID_MODE_CFG, val);
-}
-
-static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
-				 enum dw_mipi_dsi_mode mode)
-{
-	if (mode == DW_MIPI_DSI_CMD_MODE) {
-		dsi_write(dsi, DSI_PWR_UP, RESET);
-		dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
-		dsi_write(dsi, DSI_PWR_UP, POWERUP);
-	} else {
-		dsi_write(dsi, DSI_PWR_UP, RESET);
-		dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
-		dw_mipi_dsi_video_mode_config(dsi);
-		dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
-		dsi_write(dsi, DSI_PWR_UP, POWERUP);
-	}
-}
-
-static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
-{
-	dsi_write(dsi, DSI_PWR_UP, RESET);
-	dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
-}
-
-static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
-{
-	/*
-	 * The maximum permitted escape clock is 20MHz and it is derived from
-	 * lanebyteclk, which is running at "lane_mbps / 8".  Thus we want:
-	 *
-	 *     (lane_mbps >> 3) / esc_clk_division < 20
-	 * which is:
-	 *     (lane_mbps >> 3) / 20 > esc_clk_division
-	 */
-	u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
-
-	dsi_write(dsi, DSI_PWR_UP, RESET);
-	dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
-		  | PHY_RSTZ | PHY_SHUTDOWNZ);
-	dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
-		  TX_ESC_CLK_DIVIDSION(esc_clk_division));
-}
-
-static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
-				   struct drm_display_mode *mode)
-{
-	u32 val = 0, color = 0;
-
-	switch (dsi->format) {
-	case MIPI_DSI_FMT_RGB888:
-		color = DPI_COLOR_CODING_24BIT;
-		break;
-	case MIPI_DSI_FMT_RGB666:
-		color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
-		break;
-	case MIPI_DSI_FMT_RGB666_PACKED:
-		color = DPI_COLOR_CODING_18BIT_1;
-		break;
-	case MIPI_DSI_FMT_RGB565:
-		color = DPI_COLOR_CODING_16BIT_1;
-		break;
-	}
-
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-		val |= VSYNC_ACTIVE_LOW;
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		val |= HSYNC_ACTIVE_LOW;
-
-	dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
-	dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
-	dsi_write(dsi, DSI_DPI_CFG_POL, val);
-	dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
-		  | INVACT_LPCMD_TIME(4));
-}
-
-static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
-{
-	dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
-}
-
-static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
-					    struct drm_display_mode *mode)
-{
-	dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
-}
-
-static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
-{
-	dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
-	dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
-	dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
-}
-
-/* Get lane byte clock cycles. */
-static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
-					   struct drm_display_mode *mode,
-					   u32 hcomponent)
-{
-	u32 frac, lbcc;
-
-	lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
-
-	frac = lbcc % mode->clock;
-	lbcc = lbcc / mode->clock;
-	if (frac)
-		lbcc++;
-
-	return lbcc;
-}
-
-static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
-					  struct drm_display_mode *mode)
-{
-	u32 htotal, hsa, hbp, lbcc;
-
-	htotal = mode->htotal;
-	hsa = mode->hsync_end - mode->hsync_start;
-	hbp = mode->htotal - mode->hsync_end;
-
-	lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal);
-	dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
-
-	lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa);
-	dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
-
-	lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp);
-	dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
-}
-
-static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
-					       struct drm_display_mode *mode)
-{
-	u32 vactive, vsa, vfp, vbp;
-
-	vactive = mode->vdisplay;
-	vsa = mode->vsync_end - mode->vsync_start;
-	vfp = mode->vsync_start - mode->vdisplay;
-	vbp = mode->vtotal - mode->vsync_end;
-
-	dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
-	dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
-	dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
-	dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
-}
-
-static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
-{
-	dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
-		  | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
-
-	dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
-		  | PHY_CLKLP2HS_TIME(0x40));
-}
-
-static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
-{
-	dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
-		  N_LANES(dsi->lanes));
-}
-
-static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
-{
-	dsi_read(dsi, DSI_INT_ST0);
-	dsi_read(dsi, DSI_INT_ST1);
-	dsi_write(dsi, DSI_INT_MSK0, 0);
-	dsi_write(dsi, DSI_INT_MSK1, 0);
-}
-
-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
-{
-	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
-	if (dsi->dpms_mode != DRM_MODE_DPMS_ON)
-		return;
-
-	if (clk_prepare_enable(dsi->pclk)) {
-		DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
-		return;
-	}
-
-	drm_panel_disable(dsi->panel);
-
-	dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
-	drm_panel_unprepare(dsi->panel);
-
-	dw_mipi_dsi_disable(dsi);
-	pm_runtime_put(dsi->dev);
-	clk_disable_unprepare(dsi->pclk);
-	dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-}
-
-static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
-{
-	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
-	const struct dw_mipi_dsi_plat_data *pdata = dsi->pdata;
-	int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
-	u32 val;
-	int ret;
-
-	ret = dw_mipi_dsi_get_lane_bps(dsi, mode);
-	if (ret < 0)
-		return;
-
-	if (dsi->dpms_mode == DRM_MODE_DPMS_ON)
-		return;
-
-	if (clk_prepare_enable(dsi->pclk)) {
-		DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
-		return;
-	}
-
-	pm_runtime_get_sync(dsi->dev);
-	dw_mipi_dsi_init(dsi);
-	dw_mipi_dsi_dpi_config(dsi, mode);
-	dw_mipi_dsi_packet_handler_config(dsi);
-	dw_mipi_dsi_video_mode_config(dsi);
-	dw_mipi_dsi_video_packet_config(dsi, mode);
-	dw_mipi_dsi_command_mode_config(dsi);
-	dw_mipi_dsi_line_timer_config(dsi, mode);
-	dw_mipi_dsi_vertical_timing_config(dsi, mode);
-	dw_mipi_dsi_dphy_timing_config(dsi);
-	dw_mipi_dsi_dphy_interface_config(dsi);
-	dw_mipi_dsi_clear_err(dsi);
-
-	/*
-	 * For the RK3399, the clk of grf must be enabled before writing grf
-	 * register. And for RK3288 or other soc, this grf_clk must be NULL,
-	 * the clk_prepare_enable return true directly.
-	 */
-	ret = clk_prepare_enable(dsi->grf_clk);
-	if (ret) {
-		DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
-		return;
-	}
-
-	if (pdata->grf_dsi0_mode_reg)
-		regmap_write(dsi->grf_regmap, pdata->grf_dsi0_mode_reg,
-			     pdata->grf_dsi0_mode);
-
-	dw_mipi_dsi_phy_init(dsi);
-	dw_mipi_dsi_wait_for_two_frames(mode);
-
-	dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
-	if (drm_panel_prepare(dsi->panel))
-		DRM_DEV_ERROR(dsi->dev, "failed to prepare panel\n");
-
-	dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
-	drm_panel_enable(dsi->panel);
-
-	clk_disable_unprepare(dsi->pclk);
-
-	if (mux)
-		val = pdata->dsi0_en_bit | (pdata->dsi0_en_bit << 16);
-	else
-		val = pdata->dsi0_en_bit << 16;
-
-	regmap_write(dsi->grf_regmap, pdata->grf_switch_reg, val);
-	DRM_DEV_DEBUG(dsi->dev,
-		      "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
-	dsi->dpms_mode = DRM_MODE_DPMS_ON;
-
-	clk_disable_unprepare(dsi->grf_clk);
-}
-
-static int
-dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
-				 struct drm_crtc_state *crtc_state,
-				 struct drm_connector_state *conn_state)
-{
-	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
-	switch (dsi->format) {
-	case MIPI_DSI_FMT_RGB888:
-		s->output_mode = ROCKCHIP_OUT_MODE_P888;
-		break;
-	case MIPI_DSI_FMT_RGB666:
-		s->output_mode = ROCKCHIP_OUT_MODE_P666;
-		break;
-	case MIPI_DSI_FMT_RGB565:
-		s->output_mode = ROCKCHIP_OUT_MODE_P565;
-		break;
-	default:
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	s->output_type = DRM_MODE_CONNECTOR_DSI;
-
-	return 0;
-}
-
-static const struct drm_encoder_helper_funcs
-dw_mipi_dsi_encoder_helper_funcs = {
-	.enable = dw_mipi_dsi_encoder_enable,
-	.disable = dw_mipi_dsi_encoder_disable,
-	.atomic_check = dw_mipi_dsi_encoder_atomic_check,
-};
-
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
-	.destroy = drm_encoder_cleanup,
-};
-
-static int dw_mipi_dsi_connector_get_modes(struct drm_connector *connector)
-{
-	struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
-	return drm_panel_get_modes(dsi->panel);
-}
-
-static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
-	.get_modes = dw_mipi_dsi_connector_get_modes,
-};
-
-static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = dw_mipi_dsi_drm_connector_destroy,
-	.reset = drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dw_mipi_dsi_register(struct drm_device *drm,
-				struct dw_mipi_dsi *dsi)
-{
-	struct drm_encoder *encoder = &dsi->encoder;
-	struct drm_connector *connector = &dsi->connector;
-	struct device *dev = dsi->dev;
-	int ret;
-
-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm,
-							     dev->of_node);
-	/*
-	 * If we failed to find the CRTC(s) which this encoder is
-	 * supposed to be connected to, it's because the CRTC has
-	 * not been registered yet.  Defer probing, and hope that
-	 * the required CRTC is added later.
-	 */
-	if (encoder->possible_crtcs == 0)
-		return -EPROBE_DEFER;
-
-	drm_encoder_helper_add(&dsi->encoder,
-			       &dw_mipi_dsi_encoder_helper_funcs);
-	ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
-			       DRM_MODE_ENCODER_DSI, NULL);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "Failed to initialize encoder with drm\n");
-		return ret;
-	}
-
-	drm_connector_helper_add(connector,
-				 &dw_mipi_dsi_connector_helper_funcs);
-
-	drm_connector_init(drm, &dsi->connector,
-			   &dw_mipi_dsi_atomic_connector_funcs,
-			   DRM_MODE_CONNECTOR_DSI);
-
-	drm_connector_attach_encoder(connector, encoder);
-
-	return 0;
-}
-
-static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
-{
-	struct device_node *np = dsi->dev->of_node;
-
-	dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
-	if (IS_ERR(dsi->grf_regmap)) {
-		DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
-		return PTR_ERR(dsi->grf_regmap);
-	}
-
-	return 0;
-}
-
-static struct dw_mipi_dsi_plat_data rk3288_mipi_dsi_drv_data = {
-	.dsi0_en_bit = RK3288_DSI0_SEL_VOP_LIT,
-	.dsi1_en_bit = RK3288_DSI1_SEL_VOP_LIT,
-	.grf_switch_reg = RK3288_GRF_SOC_CON6,
-	.max_data_lanes = 4,
-};
-
-static struct dw_mipi_dsi_plat_data rk3399_mipi_dsi_drv_data = {
-	.dsi0_en_bit = RK3399_DSI0_SEL_VOP_LIT,
-	.dsi1_en_bit = RK3399_DSI1_SEL_VOP_LIT,
-	.grf_switch_reg = RK3399_GRF_SOC_CON20,
-	.grf_dsi0_mode = RK3399_GRF_DSI_MODE,
-	.grf_dsi0_mode_reg = RK3399_GRF_SOC_CON22,
-	.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
-	.max_data_lanes = 4,
-};
-
-static const struct of_device_id dw_mipi_dsi_dt_ids[] = {
-	{
-	 .compatible = "rockchip,rk3288-mipi-dsi",
-	 .data = &rk3288_mipi_dsi_drv_data,
-	}, {
-	 .compatible = "rockchip,rk3399-mipi-dsi",
-	 .data = &rk3399_mipi_dsi_drv_data,
-	},
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, dw_mipi_dsi_dt_ids);
-
-static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
-			    void *data)
-{
-	const struct of_device_id *of_id =
-			of_match_device(dw_mipi_dsi_dt_ids, dev);
-	const struct dw_mipi_dsi_plat_data *pdata = of_id->data;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct reset_control *apb_rst;
-	struct drm_device *drm = data;
-	struct dw_mipi_dsi *dsi;
-	struct resource *res;
-	int ret;
-
-	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
-	if (!dsi)
-		return -ENOMEM;
-
-	dsi->dev = dev;
-	dsi->pdata = pdata;
-	dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-
-	ret = rockchip_mipi_parse_dt(dsi);
-	if (ret)
-		return ret;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dsi->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(dsi->base))
-		return PTR_ERR(dsi->base);
-
-	dsi->pllref_clk = devm_clk_get(dev, "ref");
-	if (IS_ERR(dsi->pllref_clk)) {
-		ret = PTR_ERR(dsi->pllref_clk);
-		DRM_DEV_ERROR(dev,
-			      "Unable to get pll reference clock: %d\n", ret);
-		return ret;
-	}
-
-	dsi->pclk = devm_clk_get(dev, "pclk");
-	if (IS_ERR(dsi->pclk)) {
-		ret = PTR_ERR(dsi->pclk);
-		DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
-		return ret;
-	}
-
-	/*
-	 * Note that the reset was not defined in the initial device tree, so
-	 * we have to be prepared for it not being found.
-	 */
-	apb_rst = devm_reset_control_get(dev, "apb");
-	if (IS_ERR(apb_rst)) {
-		ret = PTR_ERR(apb_rst);
-		if (ret == -ENOENT) {
-			apb_rst = NULL;
-		} else {
-			DRM_DEV_ERROR(dev,
-				      "Unable to get reset control: %d\n", ret);
-			return ret;
-		}
-	}
-
-	if (apb_rst) {
-		ret = clk_prepare_enable(dsi->pclk);
-		if (ret) {
-			DRM_DEV_ERROR(dev, "Failed to enable pclk\n");
-			return ret;
-		}
-
-		reset_control_assert(apb_rst);
-		usleep_range(10, 20);
-		reset_control_deassert(apb_rst);
-
-		clk_disable_unprepare(dsi->pclk);
-	}
-
-	if (pdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
-		dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
-		if (IS_ERR(dsi->phy_cfg_clk)) {
-			ret = PTR_ERR(dsi->phy_cfg_clk);
-			DRM_DEV_ERROR(dev,
-				      "Unable to get phy_cfg_clk: %d\n", ret);
-			return ret;
-		}
-	}
-
-	if (pdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
-		dsi->grf_clk = devm_clk_get(dev, "grf");
-		if (IS_ERR(dsi->grf_clk)) {
-			ret = PTR_ERR(dsi->grf_clk);
-			DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
-			return ret;
-		}
-	}
-
-	ret = clk_prepare_enable(dsi->pllref_clk);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "Failed to enable pllref_clk\n");
-		return ret;
-	}
-
-	ret = dw_mipi_dsi_register(drm, dsi);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "Failed to register mipi_dsi: %d\n", ret);
-		goto err_pllref;
-	}
-
-	dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
-	dsi->dsi_host.dev = dev;
-	ret = mipi_dsi_host_register(&dsi->dsi_host);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
-		goto err_cleanup;
-	}
-
-	if (!dsi->panel) {
-		ret = -EPROBE_DEFER;
-		goto err_mipi_dsi_host;
-	}
-
-	dev_set_drvdata(dev, dsi);
-	pm_runtime_enable(dev);
-	return 0;
-
-err_mipi_dsi_host:
-	mipi_dsi_host_unregister(&dsi->dsi_host);
-err_cleanup:
-	dsi->connector.funcs->destroy(&dsi->connector);
-	dsi->encoder.funcs->destroy(&dsi->encoder);
-err_pllref:
-	clk_disable_unprepare(dsi->pllref_clk);
-	return ret;
-}
-
-static void dw_mipi_dsi_unbind(struct device *dev, struct device *master,
-			       void *data)
-{
-	struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
-
-	mipi_dsi_host_unregister(&dsi->dsi_host);
-	pm_runtime_disable(dev);
-
-	dsi->connector.funcs->destroy(&dsi->connector);
-	dsi->encoder.funcs->destroy(&dsi->encoder);
-
-	clk_disable_unprepare(dsi->pllref_clk);
-}
-
-static const struct component_ops dw_mipi_dsi_ops = {
-	.bind	= dw_mipi_dsi_bind,
-	.unbind	= dw_mipi_dsi_unbind,
-};
-
-static int dw_mipi_dsi_probe(struct platform_device *pdev)
-{
-	return component_add(&pdev->dev, &dw_mipi_dsi_ops);
-}
-
-static int dw_mipi_dsi_remove(struct platform_device *pdev)
-{
-	component_del(&pdev->dev, &dw_mipi_dsi_ops);
-	return 0;
-}
-
-struct platform_driver dw_mipi_dsi_driver = {
-	.probe		= dw_mipi_dsi_probe,
-	.remove		= dw_mipi_dsi_remove,
-	.driver		= {
-		.of_match_table = dw_mipi_dsi_dt_ids,
-		.name	= DRIVER_NAME,
-	},
-};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 11309a2..89c63cf 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -11,6 +11,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/phy/phy.h>
 #include <linux/regmap.h>
 
 #include <drm/drm_of.h>
@@ -24,6 +25,24 @@
 
 #define RK3288_GRF_SOC_CON6		0x025C
 #define RK3288_HDMI_LCDC_SEL		BIT(4)
+#define RK3328_GRF_SOC_CON2		0x0408
+
+#define RK3328_HDMI_SDAIN_MSK		BIT(11)
+#define RK3328_HDMI_SCLIN_MSK		BIT(10)
+#define RK3328_HDMI_HPD_IOE		BIT(2)
+#define RK3328_GRF_SOC_CON3		0x040c
+/* need to be unset if hdmi or i2c should control voltage */
+#define RK3328_HDMI_SDA5V_GRF		BIT(15)
+#define RK3328_HDMI_SCL5V_GRF		BIT(14)
+#define RK3328_HDMI_HPD5V_GRF		BIT(13)
+#define RK3328_HDMI_CEC5V_GRF		BIT(12)
+#define RK3328_GRF_SOC_CON4		0x0410
+#define RK3328_HDMI_HPD_SARADC		BIT(13)
+#define RK3328_HDMI_CEC_5V		BIT(11)
+#define RK3328_HDMI_SDA_5V		BIT(10)
+#define RK3328_HDMI_SCL_5V		BIT(9)
+#define RK3328_HDMI_HPD_5V		BIT(8)
+
 #define RK3399_GRF_SOC_CON20		0x6250
 #define RK3399_HDMI_LCDC_SEL		BIT(6)
 
@@ -36,7 +55,7 @@
  * @lcdsel_lit: reg value of selecting vop little for HDMI
  */
 struct rockchip_hdmi_chip_data {
-	u32	lcdsel_grf_reg;
+	int	lcdsel_grf_reg;
 	u32	lcdsel_big;
 	u32	lcdsel_lit;
 };
@@ -49,6 +68,7 @@ struct rockchip_hdmi {
 	struct clk *vpll_clk;
 	struct clk *grf_clk;
 	struct dw_hdmi *hdmi;
+	struct phy *phy;
 };
 
 #define to_rockchip_hdmi(x)	container_of(x, struct rockchip_hdmi, x)
@@ -245,6 +265,9 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
 	u32 val;
 	int ret;
 
+	if (hdmi->chip_data->lcdsel_grf_reg < 0)
+		return;
+
 	ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
 	if (ret)
 		val = hdmi->chip_data->lcdsel_lit;
@@ -287,6 +310,66 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun
 	.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
 };
 
+static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
+			     struct drm_display_mode *mode)
+{
+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+	return phy_power_on(hdmi->phy);
+}
+
+static void dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
+{
+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+	phy_power_off(hdmi->phy);
+}
+
+static enum drm_connector_status
+dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+	enum drm_connector_status status;
+
+	status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
+
+	if (status == connector_status_connected)
+		regmap_write(hdmi->regmap,
+			RK3328_GRF_SOC_CON4,
+			HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
+				      RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+	else
+		regmap_write(hdmi->regmap,
+			RK3328_GRF_SOC_CON4,
+			HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
+					 RK3328_HDMI_SCL_5V));
+	return status;
+}
+
+static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+	dw_hdmi_phy_setup_hpd(dw_hdmi, data);
+
+	/* Enable and map pins to 3V grf-controlled io-voltage */
+	regmap_write(hdmi->regmap,
+		RK3328_GRF_SOC_CON4,
+		HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
+				 RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
+				 RK3328_HDMI_HPD_5V));
+	regmap_write(hdmi->regmap,
+		RK3328_GRF_SOC_CON3,
+		HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
+				 RK3328_HDMI_HPD5V_GRF |
+				 RK3328_HDMI_CEC5V_GRF));
+	regmap_write(hdmi->regmap,
+		RK3328_GRF_SOC_CON2,
+		HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
+			      RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
+			      RK3328_HDMI_HPD_IOE));
+}
+
 static struct rockchip_hdmi_chip_data rk3288_chip_data = {
 	.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
 	.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
@@ -301,6 +384,29 @@ static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
 	.phy_data = &rk3288_chip_data,
 };
 
+static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+	.init		= dw_hdmi_rockchip_genphy_init,
+	.disable	= dw_hdmi_rockchip_genphy_disable,
+	.read_hpd	= dw_hdmi_rk3328_read_hpd,
+	.update_hpd	= dw_hdmi_phy_update_hpd,
+	.setup_hpd	= dw_hdmi_rk3328_setup_hpd,
+};
+
+static struct rockchip_hdmi_chip_data rk3328_chip_data = {
+	.lcdsel_grf_reg = -1,
+};
+
+static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
+	.mode_valid = dw_hdmi_rockchip_mode_valid,
+	.mpll_cfg = rockchip_mpll_cfg,
+	.cur_ctr = rockchip_cur_ctr,
+	.phy_config = rockchip_phy_config,
+	.phy_data = &rk3328_chip_data,
+	.phy_ops = &rk3328_hdmi_phy_ops,
+	.phy_name = "inno_dw_hdmi_phy2",
+	.phy_force_vendor = true,
+};
+
 static struct rockchip_hdmi_chip_data rk3399_chip_data = {
 	.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
 	.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
@@ -319,6 +425,9 @@ static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
 	{ .compatible = "rockchip,rk3288-dw-hdmi",
 	  .data = &rk3288_hdmi_drv_data
 	},
+	{ .compatible = "rockchip,rk3328-dw-hdmi",
+	  .data = &rk3328_hdmi_drv_data
+	},
 	{ .compatible = "rockchip,rk3399-dw-hdmi",
 	  .data = &rk3399_hdmi_drv_data
 	},
@@ -330,7 +439,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
 				 void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	const struct dw_hdmi_plat_data *plat_data;
+	struct dw_hdmi_plat_data *plat_data;
 	const struct of_device_id *match;
 	struct drm_device *drm = data;
 	struct drm_encoder *encoder;
@@ -345,9 +454,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
 		return -ENOMEM;
 
 	match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
-	plat_data = match->data;
+	plat_data = devm_kmemdup(&pdev->dev, match->data,
+					     sizeof(*plat_data), GFP_KERNEL);
+	if (!plat_data)
+		return -ENOMEM;
+
 	hdmi->dev = &pdev->dev;
 	hdmi->chip_data = plat_data->phy_data;
+	plat_data->phy_data = hdmi;
 	encoder = &hdmi->encoder;
 
 	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -373,6 +487,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
 		return ret;
 	}
 
+	hdmi->phy = devm_phy_optional_get(dev, "hdmi");
+	if (IS_ERR(hdmi->phy)) {
+		ret = PTR_ERR(hdmi->phy);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
+		return ret;
+	}
+
 	drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
 	drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
 			 DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 5864cb4..be6c257 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -483,7 +483,7 @@ static int __init rockchip_drm_init(void)
 	ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
 	ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
 				CONFIG_ROCKCHIP_DW_HDMI);
-	ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_driver,
+	ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
 				CONFIG_ROCKCHIP_DW_MIPI_DSI);
 	ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 21a023a..ce48568 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -37,6 +37,7 @@ struct rockchip_crtc_state {
 	int output_type;
 	int output_mode;
 	int output_bpc;
+	int output_flags;
 };
 #define to_rockchip_crtc_state(s) \
 		container_of(s, struct rockchip_crtc_state, base)
@@ -67,7 +68,7 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
 int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
 extern struct platform_driver cdn_dp_driver;
 extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
-extern struct platform_driver dw_mipi_dsi_driver;
+extern struct platform_driver dw_mipi_dsi_rockchip_driver;
 extern struct platform_driver inno_hdmi_driver;
 extern struct platform_driver rockchip_dp_driver;
 extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 79d00d8..01ff3c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
 			int (*psr_set)(struct drm_encoder *, bool enable))
 {
-	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct rockchip_drm_private *drm_drv;
 	struct psr_drv *psr;
 
 	if (!encoder || !psr_set)
 		return -EINVAL;
 
+	drm_drv = encoder->dev->dev_private;
+
 	psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
 	if (!psr)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0c35a88..fb70fb4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,6 +916,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
 	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
 		   BIT(VSYNC_POSITIVE) : 0;
 	VOP_REG_SET(vop, output, pin_pol, pin_pol);
+	VOP_REG_SET(vop, output, mipi_dual_channel_en, 0);
 
 	switch (s->output_type) {
 	case DRM_MODE_CONNECTOR_LVDS:
@@ -933,6 +934,8 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
 	case DRM_MODE_CONNECTOR_DSI:
 		VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
 		VOP_REG_SET(vop, output, mipi_en, 1);
+		VOP_REG_SET(vop, output, mipi_dual_channel_en,
+			    !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
 		pin_pol &= ~BIT(DCLK_INVERT);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index fd5765d..0fe40e1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -60,6 +60,7 @@ struct vop_output {
 	struct vop_reg edp_en;
 	struct vop_reg hdmi_en;
 	struct vop_reg mipi_en;
+	struct vop_reg mipi_dual_channel_en;
 	struct vop_reg rgb_en;
 };
 
@@ -214,6 +215,9 @@ struct vop_data {
 /* for use special outface */
 #define ROCKCHIP_OUT_MODE_AAAA	15
 
+/* output flags */
+#define ROCKCHIP_OUTPUT_DSI_DUAL	BIT(0)
+
 enum alpha_mode {
 	ALPHA_STRAIGHT,
 	ALPHA_INVERSE,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index a6db3cd..08fc40a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -361,7 +361,11 @@ static const struct vop_win_data rk3188_vop_win_data[] = {
 };
 
 static const int rk3188_vop_intrs[] = {
-	0,
+	/*
+	 * hs_start interrupt fires at frame-start, so serves
+	 * the same purpose as dsp_hold in the driver.
+	 */
+	DSP_HOLD_VALID_INTR,
 	FS_INTR,
 	LINE_FLAG_INTR,
 	BUS_ERROR_INTR,
@@ -630,6 +634,7 @@ static const struct vop_output rk3399_output = {
 	.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
 	.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
 	.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
+	.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
 };
 
 static const struct vop_data rk3399_vop_big = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54..4463d38 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 	int i;
 
 	for (i = 0; i < entity->num_rq_list; ++i) {
-		num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+		if (!entity->rq_list[i]->sched->ready) {
+			DRM_WARN("sched%s is not ready, skipping", sched->name);
+			continue;
+		}
+
+		num_jobs = atomic_read(&sched->num_jobs);
 		if (num_jobs < min_jobs) {
 			min_jobs = num_jobs;
 			rq = entity->rq_list[i];
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 	drm_sched_fence_finished(job->s_fence);
 	WARN_ON(job->s_fence->parent);
-	dma_fence_put(&job->s_fence->finished);
 	job->sched->ops->free_job(job);
 }
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587..dbb6906 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,6 +60,8 @@
 
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
+
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
@@ -196,6 +198,75 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
 }
 
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+	mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+	unsigned long sched_timeout, now = jiffies;
+
+	sched_timeout = sched->work_tdr.timer.expires;
+
+	/*
+	 * Modify the timeout to an arbitrarily large value. This also prevents
+	 * the timeout to be restarted when new submissions arrive
+	 */
+	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+			&& time_after(sched_timeout, now))
+		return sched_timeout - now;
+	else
+		return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+		unsigned long remaining)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+
+	if (list_empty(&sched->ring_mirror_list))
+		cancel_delayed_work(&sched->work_tdr);
+	else
+		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -203,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
 	struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
 						   finish_work);
 	struct drm_gpu_scheduler *sched = s_job->sched;
+	unsigned long flags;
 
 	/*
 	 * Canceling the timeout without removing our job from the ring mirror
@@ -213,14 +285,13 @@ static void drm_sched_job_finish(struct work_struct *work)
 	 */
 	cancel_delayed_work_sync(&sched->work_tdr);
 
-	spin_lock(&sched->job_list_lock);
+	spin_lock_irqsave(&sched->job_list_lock, flags);
 	/* remove job from ring_mirror_list */
-	list_del(&s_job->node);
+	list_del_init(&s_job->node);
 	/* queue TDR for next job */
 	drm_sched_start_timeout(sched);
-	spin_unlock(&sched->job_list_lock);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
-	dma_fence_put(&s_job->s_fence->finished);
 	sched->ops->free_job(s_job);
 }
 
@@ -235,55 +306,33 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
 static void drm_sched_job_begin(struct drm_sched_job *s_job)
 {
 	struct drm_gpu_scheduler *sched = s_job->sched;
+	unsigned long flags;
 
 	dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
 			       drm_sched_job_finish_cb);
 
-	spin_lock(&sched->job_list_lock);
+	spin_lock_irqsave(&sched->job_list_lock, flags);
 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
 	drm_sched_start_timeout(sched);
-	spin_unlock(&sched->job_list_lock);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 
 static void drm_sched_job_timedout(struct work_struct *work)
 {
 	struct drm_gpu_scheduler *sched;
 	struct drm_sched_job *job;
-	int r;
+	unsigned long flags;
 
 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
-
-	spin_lock(&sched->job_list_lock);
-	list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
-		struct drm_sched_fence *fence = job->s_fence;
-
-		if (!dma_fence_remove_callback(fence->parent, &fence->cb))
-			goto already_signaled;
-	}
-
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
 				       struct drm_sched_job, node);
-	spin_unlock(&sched->job_list_lock);
 
 	if (job)
-		sched->ops->timedout_job(job);
+		job->sched->ops->timedout_job(job);
 
-	spin_lock(&sched->job_list_lock);
-	list_for_each_entry(job, &sched->ring_mirror_list, node) {
-		struct drm_sched_fence *fence = job->s_fence;
-
-		if (!fence->parent || !list_empty(&fence->cb.node))
-			continue;
-
-		r = dma_fence_add_callback(fence->parent, &fence->cb,
-					   drm_sched_process_job);
-		if (r)
-			drm_sched_process_job(fence->parent, &fence->cb);
-
-already_signaled:
-		;
-	}
-	spin_unlock(&sched->job_list_lock);
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	drm_sched_start_timeout(sched);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 
 /**
@@ -297,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 {
 	struct drm_sched_job *s_job;
 	struct drm_sched_entity *entity, *tmp;
+	unsigned long flags;
 	int i;
 
-	spin_lock(&sched->job_list_lock);
+	spin_lock_irqsave(&sched->job_list_lock, flags);
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
 		if (s_job->s_fence->parent &&
 		    dma_fence_remove_callback(s_job->s_fence->parent,
@@ -309,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 			atomic_dec(&sched->hw_rq_count);
 		}
 	}
-	spin_unlock(&sched->job_list_lock);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
 	if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 		atomic_inc(&bad->karma);
@@ -347,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 {
 	struct drm_sched_job *s_job, *tmp;
 	bool found_guilty = false;
+	unsigned long flags;
 	int r;
 
-	spin_lock(&sched->job_list_lock);
+	spin_lock_irqsave(&sched->job_list_lock, flags);
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 		struct dma_fence *fence;
@@ -363,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
 
-		spin_unlock(&sched->job_list_lock);
+		spin_unlock_irqrestore(&sched->job_list_lock, flags);
 		fence = sched->ops->run_job(s_job);
 		atomic_inc(&sched->hw_rq_count);
 
@@ -378,12 +429,14 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 					  r);
 			dma_fence_put(fence);
 		} else {
+			if (s_fence->finished.error < 0)
+				drm_sched_expel_job_unlocked(s_job);
 			drm_sched_process_job(NULL, &s_fence->cb);
 		}
-		spin_lock(&sched->job_list_lock);
+		spin_lock_irqsave(&sched->job_list_lock, flags);
 	}
 	drm_sched_start_timeout(sched);
-	spin_unlock(&sched->job_list_lock);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
@@ -406,6 +459,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
 	struct drm_gpu_scheduler *sched;
 
 	drm_sched_entity_select_rq(entity);
+	if (!entity->rq)
+		return -ENOENT;
+
 	sched = entity->rq->sched;
 
 	job->sched = sched;
@@ -424,6 +480,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+	dma_fence_put(&job->s_fence->finished);
+	job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
  * drm_sched_ready - is the scheduler ready
  *
  * @sched: scheduler instance
@@ -567,6 +635,8 @@ static int drm_sched_main(void *param)
 					  r);
 			dma_fence_put(fence);
 		} else {
+			if (s_fence->finished.error < 0)
+				drm_sched_expel_job_unlocked(sched_job);
 			drm_sched_process_job(NULL, &s_fence->cb);
 		}
 
@@ -575,6 +645,15 @@ static int drm_sched_main(void *param)
 	return 0;
 }
 
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
+{
+	struct drm_gpu_scheduler *sched = s_job->sched;
+
+	spin_lock(&sched->job_list_lock);
+	list_del_init(&s_job->node);
+	spin_unlock(&sched->job_list_lock);
+}
+
 /**
  * drm_sched_init - Init a gpu scheduler instance
  *
@@ -594,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 		   long timeout,
 		   const char *name)
 {
-	int i;
+	int i, ret;
 	sched->ops = ops;
 	sched->hw_submission_limit = hw_submission;
 	sched->name = name;
@@ -615,10 +694,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
 	if (IS_ERR(sched->thread)) {
+		ret = PTR_ERR(sched->thread);
+		sched->thread = NULL;
 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
-		return PTR_ERR(sched->thread);
+		return ret;
 	}
 
+	sched->ready = true;
 	return 0;
 }
 EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +716,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
 	if (sched->thread)
 		kthread_stop(sched->thread);
+
+	sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 9fc349f..1bb73dc 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1 +1,5 @@
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
+test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
+                      test-drm_format.o test-drm_framebuffer.o \
+		      test-drm_damage_helper.o
+
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o
diff --git a/drivers/gpu/drm/selftests/drm_helper_selftests.h b/drivers/gpu/drm/selftests/drm_helper_selftests.h
deleted file mode 100644
index 9771290..0000000
--- a/drivers/gpu/drm/selftests/drm_helper_selftests.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_selftests_helper
- */
-selftest(check_plane_state, igt_check_plane_state)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
new file mode 100644
index 0000000..4647537
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_selftests_helper
+ */
+selftest(check_plane_state, igt_check_plane_state)
+selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
+selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
+selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
+selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
+selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
+selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
+selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
+selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
+selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
+selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
+selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
+selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
+selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
+selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
+selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
+selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
+selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
+selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
+selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
+selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
+selftest(damage_iter_damage, igt_damage_iter_damage)
+selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
+selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
+selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
+selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
new file mode 100644
index 0000000..9d2bcdf
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_damage_helper: " fmt
+
+#include <drm/drm_damage_helper.h>
+
+#include "test-drm_modeset_common.h"
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+			  int y2)
+{
+	state->src.x1 = x1;
+	state->src.y1 = y1;
+	state->src.x2 = x2;
+	state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+			    int y2)
+{
+	r->x1 = x1;
+	r->y1 = y1;
+	r->x2 = x2;
+	r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+			    struct drm_mode_rect *r, uint32_t size)
+{
+	damage_blob->length = size;
+	damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+			     struct drm_property_blob *damage_blob)
+{
+	state->fb_damage_clips = damage_blob;
+}
+
+static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
+			      int x1, int y1, int x2, int y2)
+{
+	/*
+	 * Round down x1/y1 and round up x2/y2. This is because damage is not in
+	 * 16.16 fixed point so to catch all pixels.
+	 */
+	int src_x1 = state->src.x1 >> 16;
+	int src_y1 = state->src.y1 >> 16;
+	int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+	int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+	if (x1 >= x2 || y1 >= y2) {
+		pr_err("Cannot have damage clip with no dimension.\n");
+		return false;
+	}
+
+	if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
+		pr_err("Damage cannot be outside rounded plane src.\n");
+		return false;
+	}
+
+	if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
+		pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
+		return false;
+	}
+
+	return true;
+}
+
+int igt_damage_iter_no_damage(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src same as fb size. */
+	set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
+	set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src has fractional part. */
+	set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	set_plane_src(&state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_src_moved(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src moved since old plane state. */
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 10 << 16, 10 << 16,
+		      (10 + 1024) << 16, (10 + 768) << 16);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src has fractional part and it moved since old plane state. */
+	set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_not_visible(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = false,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should have no damage.");
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_no_crtc(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = 0,
+		.fb = &fb,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should have no damage.");
+
+	return 0;
+}
+
+int igt_damage_iter_no_damage_no_fb(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = 0,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should have no damage.");
+
+	return 0;
+}
+
+int igt_damage_iter_simple_damage(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	/* Damage set to plane src */
+	set_damage_clip(&damage, 0, 0, 1024, 768);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage when set.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	set_damage_clip(&damage, 256, 192, 768, 576);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage when set.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	/* Damage intersect with plane src. */
+	set_damage_clip(&damage, 256, 192, 1360, 768);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage clipped to src.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_outside_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	/* Damage clip outside plane src */
+	set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should have no damage.");
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src has fractional part. */
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_damage_clip(&damage, 10, 10, 256, 330);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage when set.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src has fractional part. */
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	/* Damage intersect with plane src. */
+	set_damage_clip(&damage, 10, 1, 1360, 330);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src has fractional part. */
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	/* Damage clip outside plane src */
+	set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should have no damage.");
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_src_moved(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src moved since old plane state. */
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 10 << 16, 10 << 16,
+		      (10 + 1024) << 16, (10 + 768) << 16);
+	set_damage_clip(&damage, 20, 30, 256, 256);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+	return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage;
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	/* Plane src with fractional part moved since old plane state. */
+	set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	/* Damage intersect with plane src. */
+	set_damage_clip(&damage, 20, 30, 1360, 256);
+	set_damage_blob(&damage_blob, &damage, sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+	return 0;
+}
+
+int igt_damage_iter_damage(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage[2];
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	/* 2 damage clips. */
+	set_damage_clip(&damage[0], 20, 30, 200, 180);
+	set_damage_clip(&damage[1], 240, 200, 280, 250);
+	set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		if (num_hits == 0)
+			FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+		if (num_hits == 1)
+			FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+		num_hits++;
+	}
+
+	FAIL(num_hits != 2, "Should return damage when set.");
+
+	return 0;
+}
+
+int igt_damage_iter_damage_one_intersect(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage[2];
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	/* 2 damage clips, one intersect plane src. */
+	set_damage_clip(&damage[0], 20, 30, 200, 180);
+	set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+	set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		if (num_hits == 0)
+			FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+		if (num_hits == 1)
+			FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+		num_hits++;
+	}
+
+	FAIL(num_hits != 2, "Should return damage when set.");
+
+	return 0;
+}
+
+int igt_damage_iter_damage_one_outside(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage[2];
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+	set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+	/* 2 damage clips, one outside plane src. */
+	set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+	set_damage_clip(&damage[1], 240, 200, 280, 250);
+	set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return damage when set.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+
+	return 0;
+}
+
+int igt_damage_iter_damage_src_moved(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage[2];
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = true,
+	};
+
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	/* 2 damage clips, one outside plane src. */
+	set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+	set_damage_clip(&damage[1], 240, 200, 280, 250);
+	set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 1, "Should return round off plane src as damage.");
+	FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+	return 0;
+}
+
+int igt_damage_iter_damage_not_visible(void *ignored)
+{
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_plane_state old_state;
+	struct drm_property_blob damage_blob;
+	struct drm_mode_rect damage[2];
+	struct drm_rect clip;
+	uint32_t num_hits = 0;
+
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+
+	struct drm_plane_state state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.visible = false,
+	};
+
+	set_plane_src(&old_state, 0x40002, 0x40002,
+		      0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+	set_plane_src(&state, 0x3fffe, 0x3fffe,
+		      0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+	/* 2 damage clips, one outside plane src. */
+	set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+	set_damage_clip(&damage[1], 240, 200, 280, 250);
+	set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+	set_plane_damage(&state, &damage_blob);
+	drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	FAIL(num_hits != 0, "Should not return any damage.");
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_format.c b/drivers/gpu/drm/selftests/test-drm_format.c
new file mode 100644
index 0000000..c5e212a
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_format.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ */
+
+#define pr_fmt(fmt) "drm_format: " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_check_drm_format_block_width(void *ignored)
+{
+	const struct drm_format_info *info = NULL;
+
+	/* Test invalid arguments */
+	FAIL_ON(drm_format_info_block_width(info, 0) != 0);
+	FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+	FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+
+	/* Test 1 plane format */
+	info = drm_format_info(DRM_FORMAT_XRGB4444);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+	FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+	/* Test 2 planes format */
+	info = drm_format_info(DRM_FORMAT_NV12);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 2) != 0);
+	FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+	/* Test 3 planes format */
+	info = drm_format_info(DRM_FORMAT_YUV422);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 2) != 1);
+	FAIL_ON(drm_format_info_block_width(info, 3) != 0);
+	FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+	/* Test a tiled format */
+	info = drm_format_info(DRM_FORMAT_X0L0);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_width(info, 0) != 2);
+	FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+	FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+	return 0;
+}
+
+int igt_check_drm_format_block_height(void *ignored)
+{
+	const struct drm_format_info *info = NULL;
+
+	/* Test invalid arguments */
+	FAIL_ON(drm_format_info_block_height(info, 0) != 0);
+	FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+	FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+
+	/* Test 1 plane format */
+	info = drm_format_info(DRM_FORMAT_XRGB4444);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+	FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+	/* Test 2 planes format */
+	info = drm_format_info(DRM_FORMAT_NV12);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 2) != 0);
+	FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+	/* Test 3 planes format */
+	info = drm_format_info(DRM_FORMAT_YUV422);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 2) != 1);
+	FAIL_ON(drm_format_info_block_height(info, 3) != 0);
+	FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+	/* Test a tiled format */
+	info = drm_format_info(DRM_FORMAT_X0L0);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_block_height(info, 0) != 2);
+	FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+	FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+	return 0;
+}
+
+int igt_check_drm_format_min_pitch(void *ignored)
+{
+	const struct drm_format_info *info = NULL;
+
+	/* Test invalid arguments */
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	/* Test 1 plane 8 bits per pixel format */
+	info = drm_format_info(DRM_FORMAT_RGB332);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+			(uint64_t)(UINT_MAX - 1));
+
+	/* Test 1 plane 16 bits per pixel format */
+	info = drm_format_info(DRM_FORMAT_XRGB4444);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX * 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+			(uint64_t)(UINT_MAX - 1) * 2);
+
+	/* Test 1 plane 24 bits per pixel format */
+	info = drm_format_info(DRM_FORMAT_RGB888);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 3);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 6);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1920);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 3072);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 5760);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 12288);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2013);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX * 3);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+			(uint64_t)(UINT_MAX - 1) * 3);
+
+	/* Test 1 plane 32 bits per pixel format */
+	info = drm_format_info(DRM_FORMAT_ABGR8888);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 4);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 8);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 2560);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 4096);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 7680);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 16384);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2684);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX * 4);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+			(uint64_t)(UINT_MAX - 1) * 4);
+
+	/* Test 2 planes format */
+	info = drm_format_info(DRM_FORMAT_NV12);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 640);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 1024);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 1920);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 4096);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 672);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+			(uint64_t)UINT_MAX + 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+			(uint64_t)(UINT_MAX - 1));
+	FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) /  2) !=
+			(uint64_t)(UINT_MAX - 1));
+
+	/* Test 3 planes 8 bits per pixel format */
+	info = drm_format_info(DRM_FORMAT_YUV422);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 3, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 1) != 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 2) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 2) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 320);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 320) != 320);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 512);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 512) != 512);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 960);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 960) != 960);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 2048);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 2048) != 2048);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 336);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, 336) != 336);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+			(uint64_t)UINT_MAX / 2 + 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1) !=
+			(uint64_t)UINT_MAX / 2 + 1);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2) !=
+			(uint64_t)(UINT_MAX - 1) / 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
+			(uint64_t)(UINT_MAX - 1) / 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2) !=
+			(uint64_t)(UINT_MAX - 1) / 2);
+
+	/* Test tiled format */
+	info = drm_format_info(DRM_FORMAT_X0L2);
+	FAIL_ON(!info);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+	FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+			(uint64_t)UINT_MAX * 2);
+	FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+			(uint64_t)(UINT_MAX - 1) * 2);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
new file mode 100644
index 0000000..a04d02d
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_framebuffer functions
+ */
+
+#include <drm/drmP.h>
+#include "../drm_crtc_internal.h"
+
+#include "test-drm_modeset_common.h"
+
+#define MIN_WIDTH 4
+#define MAX_WIDTH 4096
+#define MIN_HEIGHT 4
+#define MAX_HEIGHT 4096
+
+struct drm_framebuffer_test {
+	int buffer_created;
+	struct drm_mode_fb_cmd2 cmd;
+	const char *name;
+};
+
+static struct drm_framebuffer_test createbuffer_tests[] = {
+{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 max sizes",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 pitch greater than min required",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 pitch less than min required",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid width",
+	.cmd = { .width = MAX_WIDTH + 1, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer handle",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "No pixel format",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = 0,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Width 0",
+	.cmd = { .width = 0, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Height 0",
+	.cmd = { .width = MAX_WIDTH, .height = 0, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+		 .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+		 .flags = DRM_MODE_FB_MODIFIERS, .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+		 .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+		 .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+		 .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "NV12 Normal sizes",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .pitches = { 600, 600, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "NV12 Max sizes",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 Invalid pitch",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+		 .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 different  modifier per-plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+		 .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+		 .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+						       DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+		 .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 Modifier for inexistent plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+			       DRM_FORMAT_MOD_SAMSUNG_64_32_TILE },
+		 .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+		 .handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .pitches = { 600, 300, 300 },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 Max sizes",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2),
+						      DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Invalid pitch",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) - 1,
+						      DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 Different pitches",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+						      DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH  + MAX_WIDTH * MAX_HEIGHT,
+						      MAX_WIDTH  + 2 * MAX_WIDTH * MAX_HEIGHT },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1, DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 1, .name = "YVU420 Valid modifier",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Different modifiers per plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_YTR,
+			       AFBC_FORMAT_MOD_SPARSE },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+		 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+			       AFBC_FORMAT_MOD_SPARSE },
+		 .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+	}
+},
+{ .buffer_created = 1, .name = "X0L2 Normal sizes",
+	.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
+	}
+},
+{ .buffer_created = 1, .name = "X0L2 Max sizes",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH, 0, 0 }
+	}
+},
+{ .buffer_created = 0, .name = "X0L2 Invalid pitch",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH - 1, 0, 0 }
+	}
+},
+{ .buffer_created = 1, .name = "X0L2 Pitch greater than minimum required",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+	}
+},
+{ .buffer_created = 0, .name = "X0L2 Handle for inexistent plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+		 .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+	}
+},
+{ .buffer_created = 1, .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
+		 .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+	}
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier without DRM_MODE_FB_MODIFIERS set",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+	}
+},
+{ .buffer_created = 1, .name = "X0L2 Valid modifier",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+		 .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+	}
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier for inexistent plane",
+	.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT,
+		 .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 },
+		 .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+		 .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+		 .flags = DRM_MODE_FB_MODIFIERS,
+	}
+},
+};
+
+static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
+					      struct drm_file *file_priv,
+					      const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	int *buffer_created = dev->dev_private;
+	*buffer_created = 1;
+	return ERR_PTR(-EINVAL);
+}
+
+static struct drm_mode_config_funcs mock_config_funcs = {
+	.fb_create = fb_create_mock,
+};
+
+static struct drm_device mock_drm_device = {
+	.mode_config = {
+		.min_width = MIN_WIDTH,
+		.max_width = MAX_WIDTH,
+		.min_height = MIN_HEIGHT,
+		.max_height = MAX_HEIGHT,
+		.allow_fb_modifiers = true,
+		.funcs = &mock_config_funcs,
+	},
+};
+
+static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r)
+{
+	int buffer_created = 0;
+	struct drm_framebuffer *fb;
+
+	mock_drm_device.dev_private = &buffer_created;
+	fb = drm_internal_framebuffer_create(&mock_drm_device, r, NULL);
+	return buffer_created;
+}
+
+int igt_check_drm_framebuffer_create(void *ignored)
+{
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) {
+		FAIL(createbuffer_tests[i].buffer_created !=
+				execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd),
+		     "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.c b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
new file mode 100644
index 0000000..2a7f937
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common file for modeset selftests.
+ */
+
+#include <linux/module.h>
+
+#include "test-drm_modeset_common.h"
+
+#define TESTS "drm_modeset_selftests.h"
+#include "drm_selftest.h"
+
+#include "drm_selftest.c"
+
+static int __init test_drm_modeset_init(void)
+{
+	int err;
+
+	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+	return err > 0 ? 0 : err;
+}
+
+static void __exit test_drm_modeset_exit(void)
+{
+}
+
+module_init(test_drm_modeset_init);
+module_exit(test_drm_modeset_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
new file mode 100644
index 0000000..8c76f09
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TEST_DRM_MODESET_COMMON_H__
+#define __TEST_DRM_MODESET_COMMON_H__
+
+#define FAIL(test, msg, ...) \
+	do { \
+		if (test) { \
+			pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+			return -EINVAL; \
+		} \
+	} while (0)
+
+#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+
+int igt_check_plane_state(void *ignored);
+int igt_check_drm_format_block_width(void *ignored);
+int igt_check_drm_format_block_height(void *ignored);
+int igt_check_drm_format_min_pitch(void *ignored);
+int igt_check_drm_framebuffer_create(void *ignored);
+int igt_damage_iter_no_damage(void *ignored);
+int igt_damage_iter_no_damage_fractional_src(void *ignored);
+int igt_damage_iter_no_damage_src_moved(void *ignored);
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_no_damage_not_visible(void *ignored);
+int igt_damage_iter_no_damage_no_crtc(void *ignored);
+int igt_damage_iter_no_damage_no_fb(void *ignored);
+int igt_damage_iter_simple_damage(void *ignored);
+int igt_damage_iter_single_damage(void *ignored);
+int igt_damage_iter_single_damage_intersect_src(void *ignored);
+int igt_damage_iter_single_damage_outside_src(void *ignored);
+int igt_damage_iter_single_damage_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_src_moved(void *ignored);
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_damage(void *ignored);
+int igt_damage_iter_damage_one_intersect(void *ignored);
+int igt_damage_iter_damage_one_outside(void *ignored);
+int igt_damage_iter_damage_src_moved(void *ignored);
+int igt_damage_iter_damage_not_visible(void *ignored);
+
+#endif
diff --git a/drivers/gpu/drm/selftests/test-drm-helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
similarity index 90%
rename from drivers/gpu/drm/selftests/test-drm-helper.c
rename to drivers/gpu/drm/selftests/test-drm_plane_helper.c
index a015712..0a9553f 100644
--- a/drivers/gpu/drm/selftests/test-drm-helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -1,27 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Test cases for the drm_kms_helper functions
+ * Test cases for the drm_plane_helper functions
  */
 
-#define pr_fmt(fmt) "drm_kms_helper: " fmt
-
-#include <linux/module.h>
+#define pr_fmt(fmt) "drm_plane_helper: " fmt
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_modes.h>
 
-#define TESTS "drm_helper_selftests.h"
-#include "drm_selftest.h"
-
-#define FAIL(test, msg, ...) \
-	do { \
-		if (test) { \
-			pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
-			return -EINVAL; \
-		} \
-	} while (0)
-
-#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+#include "test-drm_modeset_common.h"
 
 static void set_src(struct drm_plane_state *plane_state,
 		    unsigned src_x, unsigned src_y,
@@ -85,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state,
 	return true;
 }
 
-static int igt_check_plane_state(void *ignored)
+int igt_check_plane_state(void *ignored)
 {
 	int ret;
 
@@ -229,19 +217,3 @@ static int igt_check_plane_state(void *ignored)
 
 	return 0;
 }
-
-#include "drm_selftest.c"
-
-static int __init test_drm_helper_init(void)
-{
-	int err;
-
-	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
-	return err > 0 ? 0 : err;
-}
-
-module_init(test_drm_helper_init);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 6ececad..8554102 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -194,7 +194,7 @@ static int shmob_drm_remove(struct platform_device *pdev)
 	drm_kms_helper_poll_fini(ddev);
 	drm_mode_config_cleanup(ddev);
 	drm_irq_uninstall(ddev);
-	drm_dev_unref(ddev);
+	drm_dev_put(ddev);
 
 	return 0;
 }
@@ -290,7 +290,7 @@ static int shmob_drm_probe(struct platform_device *pdev)
 	drm_kms_helper_poll_fini(ddev);
 	drm_mode_config_cleanup(ddev);
 err_free_drm_dev:
-	drm_dev_unref(ddev);
+	drm_dev_put(ddev);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 5824e6a..ed76e52e 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -40,6 +40,8 @@ static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
 	DRM_DEBUG_DRIVER("\n");
 
 	mixer->status = STI_MIXER_DISABLING;
+
+	drm_crtc_wait_one_vblank(crtc);
 }
 
 static int
@@ -250,10 +252,8 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
 	struct sti_compositor *compo;
 	struct drm_crtc *crtc = data;
 	struct sti_mixer *mixer;
-	struct sti_private *priv;
 	unsigned int pipe;
 
-	priv = crtc->dev->dev_private;
 	pipe = drm_crtc_index(crtc);
 	compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
 	mixer = compo->mixer[pipe];
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 57b870e..bc90845 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,6 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
 {
 	DRM_DEBUG_DRIVER("\n");
 
-	drm_plane_helper_disable(drm_plane, NULL);
 	drm_plane_cleanup(drm_plane);
 }
 
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6dced8a..ac54e0f 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -206,6 +206,8 @@ static void sti_cleanup(struct drm_device *ddev)
 	struct sti_private *private = ddev->dev_private;
 
 	drm_kms_helper_poll_fini(ddev);
+	drm_atomic_helper_shutdown(ddev);
+	drm_mode_config_cleanup(ddev);
 	component_unbind_all(ddev->dev, ddev);
 	kfree(private);
 	ddev->dev_private = NULL;
@@ -230,7 +232,7 @@ static int sti_bind(struct device *dev)
 
 	ret = drm_dev_register(ddev, 0);
 	if (ret)
-		goto err_register;
+		goto err_cleanup;
 
 	drm_mode_config_reset(ddev);
 
@@ -238,8 +240,6 @@ static int sti_bind(struct device *dev)
 
 	return 0;
 
-err_register:
-	drm_mode_config_cleanup(ddev);
 err_cleanup:
 	sti_cleanup(ddev);
 err_drm_dev_put:
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c32de6c..cff7b2b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -517,7 +517,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
 	/* Allocate all the nodes within a single memory page */
 	size = sizeof(struct sti_gdp_node) *
 	    GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-	base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
+	base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
 
 	if (!base) {
 		DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -883,7 +883,6 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
 {
 	DRM_DEBUG_DRIVER("\n");
 
-	drm_plane_helper_disable(drm_plane, NULL);
 	drm_plane_cleanup(drm_plane);
 }
 
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03ac3b4..23565f5 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,6 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
 {
 	DRM_DEBUG_DRIVER("\n");
 
-	drm_plane_helper_disable(drm_plane, NULL);
 	drm_plane_cleanup(drm_plane);
 }
 
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index f2021b2..8dec001 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -26,7 +26,6 @@
 
 static const struct drm_mode_config_funcs drv_mode_config_funcs = {
 	.fb_create = drm_gem_fb_create,
-	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
@@ -52,7 +51,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
 static struct drm_driver drv_driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
 			   DRIVER_ATOMIC,
-	.lastclose = drm_fb_helper_lastclose,
 	.name = "stm",
 	.desc = "STMicroelectronics SoC DRM",
 	.date = "20170330",
@@ -72,6 +70,8 @@ static struct drm_driver drv_driver = {
 	.gem_prime_vmap = drm_gem_cma_prime_vmap,
 	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
 	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+	.get_scanout_position = ltdc_crtc_scanoutpos,
+	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
 };
 
 static int drv_load(struct drm_device *ddev)
@@ -108,12 +108,6 @@ static int drv_load(struct drm_device *ddev)
 	drm_mode_config_reset(ddev);
 	drm_kms_helper_poll_init(ddev);
 
-	if (ddev->mode_config.num_connector) {
-		ret = drm_fb_cma_fbdev_init(ddev, 16, 0);
-		if (ret)
-			DRM_DEBUG("Warning: fails to create fbdev\n");
-	}
-
 	platform_set_drvdata(pdev, ddev);
 
 	return 0;
@@ -126,7 +120,6 @@ static void drv_unload(struct drm_device *ddev)
 {
 	DRM_DEBUG("%s\n", __func__);
 
-	drm_fb_cma_fbdev_fini(ddev);
 	drm_kms_helper_poll_fini(ddev);
 	ltdc_unload(ddev);
 	drm_mode_config_cleanup(ddev);
@@ -154,6 +147,8 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_put;
 
+	drm_fbdev_generic_setup(ddev, 16);
+
 	return 0;
 
 err_put:
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 808d9fb..61dd661 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -148,6 +148,8 @@
 #define IER_TERRIE	BIT(2)		/* Transfer ERRor Interrupt Enable */
 #define IER_RRIE	BIT(3)		/* Register Reload Interrupt enable */
 
+#define CPSR_CYPOS	GENMASK(15, 0)	/* Current Y position */
+
 #define ISR_LIF		BIT(0)		/* Line Interrupt Flag */
 #define ISR_FUIF	BIT(1)		/* Fifo Underrun Interrupt Flag */
 #define ISR_TERRIF	BIT(2)		/* Transfer ERRor Interrupt Flag */
@@ -626,6 +628,49 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
 	reg_clear(ldev->regs, LTDC_IER, IER_LIE);
 }
 
+bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
+			  bool in_vblank_irq, int *vpos, int *hpos,
+			  ktime_t *stime, ktime_t *etime,
+			  const struct drm_display_mode *mode)
+{
+	struct ltdc_device *ldev = ddev->dev_private;
+	int line, vactive_start, vactive_end, vtotal;
+
+	if (stime)
+		*stime = ktime_get();
+
+	/* The active area starts after vsync + front porch and ends
+	 * at vsync + front porc + display size.
+	 * The total height also include back porch.
+	 * We have 3 possible cases to handle:
+	 * - line < vactive_start: vpos = line - vactive_start and will be
+	 * negative
+	 * - vactive_start < line < vactive_end: vpos = line - vactive_start
+	 * and will be positive
+	 * - line > vactive_end: vpos = line - vtotal - vactive_start
+	 * and will negative
+	 *
+	 * Computation for the two first cases are identical so we can
+	 * simplify the code and only test if line > vactive_end
+	 */
+	line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
+	vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
+	vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
+	vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
+
+	if (line > vactive_end)
+		*vpos = line - vtotal - vactive_start;
+	else
+		*vpos = line - vactive_start;
+
+	*hpos = 0;
+
+	if (etime)
+		*etime = ktime_get();
+
+	return true;
+}
+
 static const struct drm_crtc_funcs ltdc_crtc_funcs = {
 	.destroy = drm_crtc_cleanup,
 	.set_config = drm_atomic_helper_set_config,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index d5afb89..e46f477 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -38,6 +38,11 @@ struct ltdc_device {
 	struct fps_info plane_fpsi[LTDC_MAX_LAYER];
 };
 
+bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+			  bool in_vblank_irq, int *vpos, int *hpos,
+			  ktime_t *stime, ktime_t *etime,
+			  const struct drm_display_mode *mode);
+
 int ltdc_load(struct drm_device *ddev);
 void ltdc_unload(struct drm_device *ddev);
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf49c55..9e9255e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -48,8 +48,12 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
 /*
  * These coefficients are taken from the A33 BSP from Allwinner.
  *
- * The formula is for each component, each coefficient being multiplied by
- * 1024 and each constant being multiplied by 16:
+ * The first three values of each row are coded as 13-bit signed fixed-point
+ * numbers, with 10 bits for the fractional part. The fourth value is a
+ * constant coded as a 14-bit signed fixed-point number with 4 bits for the
+ * fractional part.
+ *
+ * The values in table order give the following colorspace translation:
  * G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
  * R = 1.164 * Y + 1.596 * V - 222
  * B = 1.164 * Y + 2.018 * U + 276
@@ -155,6 +159,36 @@ static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
 	return 0;
 }
 
+static const uint32_t sun4i_backend_formats[] = {
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_RGBA4444,
+	DRM_FORMAT_RGBA5551,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+};
+
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+	unsigned int i;
+
+	if (modifier != DRM_FORMAT_MOD_LINEAR)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
+		if (sun4i_backend_formats[i] == fmt)
+			return true;
+
+	return false;
+}
+
 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
 				     int layer, struct drm_plane *plane)
 {
@@ -395,6 +429,15 @@ int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
 	return 0;
 }
 
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+				 int layer)
+{
+	regmap_update_bits(backend->engine.regs,
+			   SUN4I_BACKEND_ATTCTL_REG0(layer),
+			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
+			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+}
+
 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
 {
 	u16 src_h = state->src_h >> 16;
@@ -413,11 +456,50 @@ static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
 {
 	struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
 	struct sun4i_backend *backend = layer->backend;
+	uint32_t format = state->fb->format->format;
+	uint64_t modifier = state->fb->modifier;
 
 	if (IS_ERR(backend->frontend))
 		return false;
 
-	return sun4i_backend_plane_uses_scaler(state);
+	if (!sun4i_frontend_format_is_supported(format, modifier))
+		return false;
+
+	if (!sun4i_backend_format_is_supported(format, modifier))
+		return true;
+
+	/*
+	 * TODO: The backend alone allows 2x and 4x integer scaling, including
+	 * support for an alpha component (which the frontend doesn't support).
+	 * Use the backend directly instead of the frontend in this case, with
+	 * another test to return false.
+	 */
+
+	if (sun4i_backend_plane_uses_scaler(state))
+		return true;
+
+	/*
+	 * Here the format is supported by both the frontend and the backend
+	 * and no frontend scaling is required, so use the backend directly.
+	 */
+	return false;
+}
+
+static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
+					     bool *uses_frontend)
+{
+	if (sun4i_backend_plane_uses_frontend(state)) {
+		*uses_frontend = true;
+		return true;
+	}
+
+	*uses_frontend = false;
+
+	/* Scaling is not supported without the frontend. */
+	if (sun4i_backend_plane_uses_scaler(state))
+		return false;
+
+	return true;
 }
 
 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
@@ -460,14 +542,19 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
 		struct drm_framebuffer *fb = plane_state->fb;
 		struct drm_format_name_buf format_name;
 
-		if (sun4i_backend_plane_uses_frontend(plane_state)) {
+		if (!sun4i_backend_plane_is_supported(plane_state,
+						      &layer_state->uses_frontend))
+			return -EINVAL;
+
+		if (layer_state->uses_frontend) {
 			DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
 					 plane->index);
-
-			layer_state->uses_frontend = true;
 			num_frontend_planes++;
 		} else {
-			layer_state->uses_frontend = false;
+			if (fb->format->is_yuv) {
+				DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+				num_yuv_planes++;
+			}
 		}
 
 		DRM_DEBUG_DRIVER("Plane FB format is %s\n",
@@ -476,11 +563,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
 		if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
 			num_alpha_planes++;
 
-		if (fb->format->is_yuv) {
-			DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
-			num_yuv_planes++;
-		}
-
 		DRM_DEBUG_DRIVER("Plane zpos is %d\n",
 				 plane_state->normalized_zpos);
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index e3d4c60..01f6646 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -198,6 +198,7 @@ engine_to_sun4i_backend(struct sunxi_engine *engine)
 
 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
 				int layer, bool enable);
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier);
 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
 				     int layer, struct drm_plane *plane);
 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
@@ -208,5 +209,7 @@ int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
 					int layer, uint32_t in_fmt);
 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend,
 				    int layer, struct drm_plane *plane);
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+				 int layer);
 
 #endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1e41c3f..9e4c375 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -28,13 +28,22 @@
 #include "sun4i_tcon.h"
 #include "sun8i_tcon_top.h"
 
+static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
+				     struct drm_device *drm,
+				     struct drm_mode_create_dumb *args)
+{
+	/* The hardware only allows even pitches for YUV buffers. */
+	args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
+
+	return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
+
 DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
 
 static struct drm_driver sun4i_drv_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
 
 	/* Generic Operations */
-	.lastclose		= drm_fb_helper_lastclose,
 	.fops			= &sun4i_drv_fops,
 	.name			= "sun4i-drm",
 	.desc			= "Allwinner sun4i Display Engine",
@@ -43,7 +52,7 @@ static struct drm_driver sun4i_drv_driver = {
 	.minor			= 0,
 
 	/* GEM Operations */
-	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_create		= drm_sun4i_gem_dumb_create,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
 
@@ -105,12 +114,7 @@ static int sun4i_drv_bind(struct device *dev)
 	/* Remove early framebuffers (ie. simplefb) */
 	drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
 
-	/* Create our framebuffer */
-	ret = sun4i_framebuffer_init(drm);
-	if (ret) {
-		dev_err(drm->dev, "Couldn't create our framebuffer\n");
-		goto cleanup_mode_config;
-	}
+	sun4i_framebuffer_init(drm);
 
 	/* Enable connectors polling */
 	drm_kms_helper_poll_init(drm);
@@ -119,11 +123,12 @@ static int sun4i_drv_bind(struct device *dev)
 	if (ret)
 		goto finish_poll;
 
+	drm_fbdev_generic_setup(drm, 32);
+
 	return 0;
 
 finish_poll:
 	drm_kms_helper_poll_fini(drm);
-	sun4i_framebuffer_free(drm);
 cleanup_mode_config:
 	drm_mode_config_cleanup(drm);
 	of_reserved_mem_device_release(dev);
@@ -138,7 +143,6 @@ static void sun4i_drv_unbind(struct device *dev)
 
 	drm_dev_unregister(drm);
 	drm_kms_helper_poll_fini(drm);
-	sun4i_framebuffer_free(drm);
 	drm_mode_config_cleanup(drm);
 	of_reserved_mem_device_release(dev);
 	drm_dev_put(drm);
@@ -406,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
 	{ .compatible = "allwinner,sun8i-v3s-display-engine" },
 	{ .compatible = "allwinner,sun9i-a80-display-engine" },
 	{ .compatible = "allwinner,sun50i-a64-display-engine" },
+	{ .compatible = "allwinner,sun50i-h6-display-engine" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 5f29850..cb82802 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -12,8 +12,6 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drmP.h>
 
@@ -37,7 +35,6 @@ static int sun4i_de_atomic_check(struct drm_device *dev,
 }
 
 static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
-	.output_poll_changed	= drm_fb_helper_output_poll_changed,
 	.atomic_check		= sun4i_de_atomic_check,
 	.atomic_commit		= drm_atomic_helper_commit,
 	.fb_create		= drm_gem_fb_create,
@@ -47,7 +44,7 @@ static struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
 	.atomic_commit_tail	= drm_atomic_helper_commit_tail_rpm,
 };
 
-int sun4i_framebuffer_init(struct drm_device *drm)
+void sun4i_framebuffer_init(struct drm_device *drm)
 {
 	drm_mode_config_reset(drm);
 
@@ -56,11 +53,4 @@ int sun4i_framebuffer_init(struct drm_device *drm)
 
 	drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
 	drm->mode_config.helper_private = &sun4i_de_mode_config_helpers;
-
-	return drm_fb_cma_fbdev_init(drm, 32, 0);
-}
-
-void sun4i_framebuffer_free(struct drm_device *drm)
-{
-	drm_fb_cma_fbdev_fini(drm);
 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
index 7ef0aed..6fe5bd8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -13,7 +13,6 @@
 #ifndef _SUN4I_FRAMEBUFFER_H_
 #define _SUN4I_FRAMEBUFFER_H_
 
-int sun4i_framebuffer_init(struct drm_device *drm);
-void sun4i_framebuffer_free(struct drm_device *drm);
+void sun4i_framebuffer_init(struct drm_device *drm);
 
 #endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ddf6cfa..1a7ebc4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -107,8 +107,34 @@ EXPORT_SYMBOL(sun4i_frontend_update_buffer);
 static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
 {
 	switch (fmt) {
-	case DRM_FORMAT_ARGB8888:
-		*val = 5;
+	case DRM_FORMAT_XRGB8888:
+		*val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sun4i_frontend_drm_format_to_input_mode(uint32_t fmt, u32 *val)
+{
+	if (drm_format_num_planes(fmt) == 1)
+		*val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int sun4i_frontend_drm_format_to_input_sequence(uint32_t fmt, u32 *val)
+{
+	switch (fmt) {
+	case DRM_FORMAT_BGRX8888:
+		*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
+		return 0;
+
+	case DRM_FORMAT_XRGB8888:
+		*val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
 		return 0;
 
 	default:
@@ -119,9 +145,12 @@ static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
 static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
 {
 	switch (fmt) {
+	case DRM_FORMAT_BGRX8888:
+		*val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888;
+		return 0;
+
 	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_ARGB8888:
-		*val = 2;
+		*val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888;
 		return 0;
 
 	default:
@@ -129,22 +158,54 @@ static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
 	}
 }
 
+static const uint32_t sun4i_frontend_formats[] = {
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_XRGB8888,
+};
+
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+	unsigned int i;
+
+	if (modifier != DRM_FORMAT_MOD_LINEAR)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
+		if (sun4i_frontend_formats[i] == fmt)
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL(sun4i_frontend_format_is_supported);
+
 int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
 				  struct drm_plane *plane, uint32_t out_fmt)
 {
 	struct drm_plane_state *state = plane->state;
 	struct drm_framebuffer *fb = state->fb;
+	uint32_t format = fb->format->format;
 	u32 out_fmt_val;
-	u32 in_fmt_val;
+	u32 in_fmt_val, in_mod_val, in_ps_val;
 	int ret;
 
-	ret = sun4i_frontend_drm_format_to_input_fmt(fb->format->format,
-						     &in_fmt_val);
+	ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Invalid input format\n");
 		return ret;
 	}
 
+	ret = sun4i_frontend_drm_format_to_input_mode(format, &in_mod_val);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Invalid input mode\n");
+		return ret;
+	}
+
+	ret = sun4i_frontend_drm_format_to_input_sequence(format, &in_ps_val);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Invalid pixel sequence\n");
+		return ret;
+	}
+
 	ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Invalid output format\n");
@@ -162,10 +223,12 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
 
+	regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
+			   SUN4I_FRONTEND_BYPASS_CSC_EN,
+			   SUN4I_FRONTEND_BYPASS_CSC_EN);
+
 	regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
-		     SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(1) |
-		     SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(in_fmt_val) |
-		     SUN4I_FRONTEND_INPUT_FMT_PS(1));
+		     in_mod_val | in_fmt_val | in_ps_val);
 
 	/*
 	 * TODO: It look like the A31 and A80 at least will need the
@@ -173,7 +236,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
 	 * ARGB8888).
 	 */
 	regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
-		     SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(out_fmt_val));
+		     out_fmt_val);
 
 	return 0;
 }
@@ -183,16 +246,24 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
 				 struct drm_plane *plane)
 {
 	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = state->fb;
+	uint32_t luma_width, luma_height;
+	uint32_t chroma_width, chroma_height;
 
 	/* Set height and width */
 	DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
 			 state->crtc_w, state->crtc_h);
+
+	luma_width = state->src_w >> 16;
+	luma_height = state->src_h >> 16;
+
+	chroma_width = DIV_ROUND_UP(luma_width, fb->format->hsub);
+	chroma_height = DIV_ROUND_UP(luma_height, fb->format->vsub);
+
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
-		     SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
-					   state->src_w >> 16));
+		     SUN4I_FRONTEND_INSIZE(luma_height, luma_width));
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
-		     SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
-					   state->src_w >> 16));
+		     SUN4I_FRONTEND_INSIZE(chroma_height, chroma_width));
 
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
 		     SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
@@ -200,14 +271,14 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
 		     SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
 
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
-		     state->src_w / state->crtc_w);
+		     (luma_width << 16) / state->crtc_w);
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
-		     state->src_w / state->crtc_w);
+		     (chroma_width << 16) / state->crtc_w);
 
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
-		     state->src_h / state->crtc_h);
+		     (luma_height << 16) / state->crtc_h);
 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
-		     state->src_h / state->crtc_h);
+		     (chroma_height << 16) / state->crtc_h);
 
 	regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
 			  SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
@@ -339,10 +410,6 @@ static int sun4i_frontend_runtime_resume(struct device *dev)
 			   SUN4I_FRONTEND_EN_EN,
 			   SUN4I_FRONTEND_EN_EN);
 
-	regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
-			   SUN4I_FRONTEND_BYPASS_CSC_EN,
-			   SUN4I_FRONTEND_BYPASS_CSC_EN);
-
 	sun4i_frontend_scaler_init(frontend);
 
 	return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 02661ce..ad146e8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -26,12 +26,14 @@
 #define SUN4I_FRONTEND_LINESTRD0_REG		0x040
 
 #define SUN4I_FRONTEND_INPUT_FMT_REG		0x04c
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(mod)		((mod) << 8)
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(fmt)		((fmt) << 4)
-#define SUN4I_FRONTEND_INPUT_FMT_PS(ps)			(ps)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED	(1 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB		(5 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX		0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB		1
 
 #define SUN4I_FRONTEND_OUTPUT_FMT_REG		0x05c
-#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(fmt)		(fmt)
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888	1
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888	2
 
 #define SUN4I_FRONTEND_CH0_INSIZE_REG		0x100
 #define SUN4I_FRONTEND_INSIZE(h, w)			((((h) - 1) << 16) | (((w) - 1)))
@@ -95,5 +97,6 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
 				 struct drm_plane *plane);
 int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
 				  struct drm_plane *plane, uint32_t out_fmt);
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier);
 
 #endif /* _SUN4I_FRONTEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 3ecffa5..fb985ba 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -35,7 +35,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
 {
 	unsigned long best_rate = 0;
 	u8 best_m = 0, m;
-	bool is_double;
+	bool is_double = false;
 
 	for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
 		u8 d;
@@ -52,7 +52,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
 			    (rate - tmp_rate) < (rate - best_rate)) {
 				best_rate = tmp_rate;
 				best_m = m;
-				is_double = d;
+				is_double = (d == 2) ? true : false;
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 78f77af..29631e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -12,6 +12,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drmP.h>
 
 #include "sun4i_backend.h"
@@ -92,14 +93,16 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
 	struct sun4i_backend *backend = layer->backend;
 	struct sun4i_frontend *frontend = backend->frontend;
 
+	sun4i_backend_cleanup_layer(backend, layer->id);
+
 	if (layer_state->uses_frontend) {
 		sun4i_frontend_init(frontend);
 		sun4i_frontend_update_coord(frontend, plane);
 		sun4i_frontend_update_buffer(frontend, plane);
 		sun4i_frontend_update_formats(frontend, plane,
-					      DRM_FORMAT_ARGB8888);
+					      DRM_FORMAT_XRGB8888);
 		sun4i_backend_update_layer_frontend(backend, layer->id,
-						    DRM_FORMAT_ARGB8888);
+						    DRM_FORMAT_XRGB8888);
 		sun4i_frontend_enable(frontend);
 	} else {
 		sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -112,6 +115,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
 }
 
 static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+	.prepare_fb	= drm_gem_fb_prepare_fb,
 	.atomic_disable	= sun4i_backend_layer_atomic_disable,
 	.atomic_update	= sun4i_backend_layer_atomic_update,
 };
@@ -125,10 +129,11 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
 	.update_plane		= drm_atomic_helper_update_plane,
 };
 
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_layer_formats[] = {
 	DRM_FORMAT_ARGB8888,
 	DRM_FORMAT_ARGB4444,
 	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_BGRX8888,
 	DRM_FORMAT_RGBA5551,
 	DRM_FORMAT_RGBA4444,
 	DRM_FORMAT_RGB888,
@@ -154,8 +159,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
 	/* possible crtcs are set later */
 	ret = drm_universal_plane_init(drm, &layer->plane, 0,
 				       &sun4i_backend_layer_funcs,
-				       sun4i_backend_layer_formats,
-				       ARRAY_SIZE(sun4i_backend_layer_formats),
+				       sun4i_layer_formats,
+				       ARRAY_SIZE(sun4i_layer_formats),
 				       NULL, type, NULL);
 	if (ret) {
 		dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index f949287..0420f5c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -478,8 +478,11 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 }
 
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+				     const struct drm_encoder *encoder,
 				     const struct drm_display_mode *mode)
 {
+	struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
+	struct drm_display_info display_info = connector->display_info;
 	unsigned int bp, hsync, vsync;
 	u8 clk_delay;
 	u32 val = 0;
@@ -491,8 +494,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 	sun4i_tcon0_mode_set_common(tcon, mode);
 
 	/* Set dithering if needed */
-	if (tcon->panel)
-		sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+	sun4i_tcon0_mode_set_dithering(tcon, connector);
 
 	/* Adjust clock delay */
 	clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -541,6 +543,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
 		val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
 
+	if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+		val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
 	/*
 	 * On A20 and similar SoCs, the only way to achieve Positive Edge
 	 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
@@ -556,20 +561,16 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 	 * Following code is a way to avoid quirks all around TCON
 	 * and DOTCLOCK drivers.
 	 */
-	if (tcon->panel) {
-		struct drm_panel *panel = tcon->panel;
-		struct drm_connector *connector = panel->connector;
-		struct drm_display_info display_info = connector->display_info;
+	if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+		clk_set_phase(tcon->dclk, 240);
 
-		if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
-			clk_set_phase(tcon->dclk, 240);
-
-		if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
-			clk_set_phase(tcon->dclk, 0);
-	}
+	if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+		clk_set_phase(tcon->dclk, 0);
 
 	regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
-			   SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+			   SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+			   SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+			   SUN4I_TCON0_IO_POL_DE_NEGATIVE,
 			   val);
 
 	/* Map output pins to channel 0 */
@@ -684,7 +685,7 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
 		sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
 		break;
 	case DRM_MODE_ENCODER_NONE:
-		sun4i_tcon0_mode_set_rgb(tcon, mode);
+		sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
 		sun4i_tcon_set_mux(tcon, 0, encoder);
 		break;
 	case DRM_MODE_ENCODER_TVDAC:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 3d492c8..b5214d7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -116,6 +116,7 @@
 
 #define SUN4I_TCON0_IO_POL_REG			0x88
 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase)		((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_DE_NEGATIVE			BIT(27)
 #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE		BIT(25)
 #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE		BIT(24)
 
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index b14925b..e7608a7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -34,6 +34,41 @@ static const u32 yvu2rgb[] = {
 	0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
 };
 
+/*
+ * DE3 has a bit different CSC units. Factors are in two's complement format.
+ * First three factors in a row are multiplication factors which have 17 bits
+ * for fractional part. Fourth value in a row is comprised of two factors.
+ * Upper 16 bits represents difference, which is subtracted from the input
+ * value before multiplication and lower 16 bits represents constant, which
+ * is addes at the end.
+ *
+ * x' = c00 * (x + d0) + c01 * (y + d1) + c02 * (z + d2) + const0
+ * y' = c10 * (x + d0) + c11 * (y + d1) + c12 * (z + d2) + const1
+ * z' = c20 * (x + d0) + c21 * (y + d1) + c22 * (z + d2) + const2
+ *
+ * Please note that above formula is true only for Blender CSC. Other DE3 CSC
+ * units takes only positive value for difference. From what can be deducted
+ * from BSP driver code, those units probably automatically assume that
+ * difference has to be subtracted.
+ *
+ * Layout of factors in table:
+ * c00 c01 c02 [d0 const0]
+ * c10 c11 c12 [d1 const1]
+ * c20 c21 c22 [d2 const2]
+ */
+
+static const u32 yuv2rgb_de3[] = {
+	0x0002542a, 0x00000000, 0x0003312a, 0xffc00000,
+	0x0002542a, 0xffff376b, 0xfffe5fc3, 0xfe000000,
+	0x0002542a, 0x000408d3, 0x00000000, 0xfe000000,
+};
+
+static const u32 yvu2rgb_de3[] = {
+	0x0002542a, 0x0003312a, 0x00000000, 0xffc00000,
+	0x0002542a, 0xfffe5fc3, 0xffff376b, 0xfe000000,
+	0x0002542a, 0x00000000, 0x000408d3, 0xfe000000,
+};
+
 static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
 				       enum sun8i_csc_mode mode)
 {
@@ -61,6 +96,28 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
 	}
 }
 
+static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
+					    enum sun8i_csc_mode mode)
+{
+	const u32 *table;
+	u32 base_reg;
+
+	switch (mode) {
+	case SUN8I_CSC_MODE_YUV2RGB:
+		table = yuv2rgb_de3;
+		break;
+	case SUN8I_CSC_MODE_YVU2RGB:
+		table = yvu2rgb_de3;
+		break;
+	default:
+		DRM_WARN("Wrong CSC mode specified.\n");
+		return;
+	}
+
+	base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
+	regmap_bulk_write(map, base_reg, table, 12);
+}
+
 static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
 {
 	u32 val;
@@ -73,11 +130,32 @@ static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
 	regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
 }
 
+static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
+{
+	u32 val, mask;
+
+	mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
+
+	if (enable)
+		val = mask;
+	else
+		val = 0;
+
+	regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
+			   mask, val);
+}
+
 void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
 				     enum sun8i_csc_mode mode)
 {
 	u32 base;
 
+	if (mixer->cfg->is_de3) {
+		sun8i_de3_ccsc_set_coefficients(mixer->engine.regs,
+						layer, mode);
+		return;
+	}
+
 	base = ccsc_base[mixer->cfg->ccsc][layer];
 
 	sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
@@ -87,6 +165,11 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
 {
 	u32 base;
 
+	if (mixer->cfg->is_de3) {
+		sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
+		return;
+	}
+
 	base = ccsc_base[mixer->cfg->ccsc][layer];
 
 	sun8i_csc_enable(mixer->engine.regs, base, enable);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index ed29837..dc47720 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -5,6 +5,7 @@
 
 #include <linux/component.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #include <drm/drm_of.h>
@@ -20,7 +21,8 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
 {
 	struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
 
-	clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+	if (hdmi->quirks->set_rate)
+		clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
 }
 
 static const struct drm_encoder_helper_funcs
@@ -33,8 +35,8 @@ static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
 };
 
 static enum drm_mode_status
-sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
-			 const struct drm_display_mode *mode)
+sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
+			      const struct drm_display_mode *mode)
 {
 	if (mode->clock > 297000)
 		return MODE_CLOCK_HIGH;
@@ -42,6 +44,17 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
+static enum drm_mode_status
+sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
+			    const struct drm_display_mode *mode)
+{
+	/* This is max for HDMI 2.0b (4K@60Hz) */
+	if (mode->clock > 594000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
 static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
 {
 	return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
@@ -102,6 +115,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
 	hdmi->dev = &pdev->dev;
 	encoder = &hdmi->encoder;
 
+	hdmi->quirks = of_device_get_match_data(dev);
+
 	encoder->possible_crtcs =
 		sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
 	/*
@@ -168,10 +183,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
 
 	sun8i_hdmi_phy_init(hdmi->phy);
 
-	plat_data->mode_valid = &sun8i_dw_hdmi_mode_valid;
-	plat_data->phy_ops = sun8i_hdmi_phy_get_ops();
-	plat_data->phy_name = "sun8i_dw_hdmi_phy";
-	plat_data->phy_data = hdmi->phy;
+	plat_data->mode_valid = hdmi->quirks->mode_valid;
+	sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
 
 	platform_set_drvdata(pdev, hdmi);
 
@@ -230,8 +243,24 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
+	.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
+	.set_rate = true,
+};
+
+static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
+	.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+};
+
 static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
-	{ .compatible = "allwinner,sun8i-a83t-dw-hdmi" },
+	{
+		.compatible = "allwinner,sun8i-a83t-dw-hdmi",
+		.data = &sun8i_a83t_quirks,
+	},
+	{
+		.compatible = "allwinner,sun50i-h6-dw-hdmi",
+		.data = &sun50i_h6_quirks,
+	},
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 7fdc1ec..720c5aa 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -150,6 +150,10 @@ struct sun8i_hdmi_phy;
 struct sun8i_hdmi_phy_variant {
 	bool has_phy_clk;
 	bool has_second_pll;
+	unsigned int is_custom_phy : 1;
+	const struct dw_hdmi_curr_ctrl *cur_ctr;
+	const struct dw_hdmi_mpll_config *mpll_cfg;
+	const struct dw_hdmi_phy_config *phy_cfg;
 	void (*phy_init)(struct sun8i_hdmi_phy *phy);
 	void (*phy_disable)(struct dw_hdmi *hdmi,
 			    struct sun8i_hdmi_phy *phy);
@@ -170,6 +174,12 @@ struct sun8i_hdmi_phy {
 	struct sun8i_hdmi_phy_variant	*variant;
 };
 
+struct sun8i_dw_hdmi_quirks {
+	enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+					   const struct drm_display_mode *mode);
+	unsigned int set_rate : 1;
+};
+
 struct sun8i_dw_hdmi {
 	struct clk			*clk_tmds;
 	struct device			*dev;
@@ -178,6 +188,7 @@ struct sun8i_dw_hdmi {
 	struct sun8i_hdmi_phy		*phy;
 	struct dw_hdmi_plat_data	plat_data;
 	struct regulator		*regulator;
+	const struct sun8i_dw_hdmi_quirks *quirks;
 	struct reset_control		*rst_ctrl;
 };
 
@@ -191,7 +202,8 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
 
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+			    struct dw_hdmi_plat_data *plat_data);
 
 int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
 			 bool second_parent);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 4719930..66ea3a9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -14,6 +14,122 @@
  */
 #define I2C_ADDR	0x69
 
+static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
+	{
+		30666000, {
+			{ 0x00b3, 0x0000 },
+			{ 0x2153, 0x0000 },
+			{ 0x40f3, 0x0000 },
+		},
+	},  {
+		36800000, {
+			{ 0x00b3, 0x0000 },
+			{ 0x2153, 0x0000 },
+			{ 0x40a2, 0x0001 },
+		},
+	},  {
+		46000000, {
+			{ 0x00b3, 0x0000 },
+			{ 0x2142, 0x0001 },
+			{ 0x40a2, 0x0001 },
+		},
+	},  {
+		61333000, {
+			{ 0x0072, 0x0001 },
+			{ 0x2142, 0x0001 },
+			{ 0x40a2, 0x0001 },
+		},
+	},  {
+		73600000, {
+			{ 0x0072, 0x0001 },
+			{ 0x2142, 0x0001 },
+			{ 0x4061, 0x0002 },
+		},
+	},  {
+		92000000, {
+			{ 0x0072, 0x0001 },
+			{ 0x2145, 0x0002 },
+			{ 0x4061, 0x0002 },
+		},
+	},  {
+		122666000, {
+			{ 0x0051, 0x0002 },
+			{ 0x2145, 0x0002 },
+			{ 0x4061, 0x0002 },
+		},
+	},  {
+		147200000, {
+			{ 0x0051, 0x0002 },
+			{ 0x2145, 0x0002 },
+			{ 0x4064, 0x0003 },
+		},
+	},  {
+		184000000, {
+			{ 0x0051, 0x0002 },
+			{ 0x214c, 0x0003 },
+			{ 0x4064, 0x0003 },
+		},
+	},  {
+		226666000, {
+			{ 0x0040, 0x0003 },
+			{ 0x214c, 0x0003 },
+			{ 0x4064, 0x0003 },
+		},
+	},  {
+		272000000, {
+			{ 0x0040, 0x0003 },
+			{ 0x214c, 0x0003 },
+			{ 0x5a64, 0x0003 },
+		},
+	},  {
+		340000000, {
+			{ 0x0040, 0x0003 },
+			{ 0x3b4c, 0x0003 },
+			{ 0x5a64, 0x0003 },
+		},
+	},  {
+		594000000, {
+			{ 0x1a40, 0x0003 },
+			{ 0x3b4c, 0x0003 },
+			{ 0x5a64, 0x0003 },
+		},
+	}, {
+		~0UL, {
+			{ 0x0000, 0x0000 },
+			{ 0x0000, 0x0000 },
+			{ 0x0000, 0x0000 },
+		},
+	}
+};
+
+static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
+	/* pixelclk    bpp8    bpp10   bpp12 */
+	{ 25175000,  { 0x0000, 0x0000, 0x0000 }, },
+	{ 27000000,  { 0x0012, 0x0000, 0x0000 }, },
+	{ 59400000,  { 0x0008, 0x0008, 0x0008 }, },
+	{ 72000000,  { 0x0008, 0x0008, 0x001b }, },
+	{ 74250000,  { 0x0013, 0x0013, 0x0013 }, },
+	{ 90000000,  { 0x0008, 0x001a, 0x001b }, },
+	{ 118800000, { 0x001b, 0x001a, 0x001b }, },
+	{ 144000000, { 0x001b, 0x001a, 0x0034 }, },
+	{ 180000000, { 0x001b, 0x0033, 0x0034 }, },
+	{ 216000000, { 0x0036, 0x0033, 0x0034 }, },
+	{ 237600000, { 0x0036, 0x0033, 0x001b }, },
+	{ 288000000, { 0x0036, 0x001b, 0x001b }, },
+	{ 297000000, { 0x0019, 0x001b, 0x0019 }, },
+	{ 330000000, { 0x0036, 0x001b, 0x001b }, },
+	{ 594000000, { 0x003f, 0x001b, 0x001b }, },
+	{ ~0UL,      { 0x0000, 0x0000, 0x0000 }, }
+};
+
+static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
+	/*pixelclk   symbol   term   vlev*/
+	{ 74250000,  0x8009, 0x0004, 0x0232},
+	{ 148500000, 0x8029, 0x0004, 0x0273},
+	{ 594000000, 0x8039, 0x0004, 0x014a},
+	{ ~0UL,	     0x0000, 0x0000, 0x0000}
+};
+
 static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
 				      struct sun8i_hdmi_phy *phy,
 				      unsigned int clk_rate)
@@ -279,8 +395,31 @@ static const struct dw_hdmi_phy_ops sun8i_hdmi_phy_ops = {
 	.setup_hpd = &dw_hdmi_phy_setup_hpd,
 };
 
+static void sun8i_hdmi_phy_unlock(struct sun8i_hdmi_phy *phy)
+{
+	/* enable read access to HDMI controller */
+	regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
+		     SUN8I_HDMI_PHY_READ_EN_MAGIC);
+
+	/* unscramble register offsets */
+	regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
+		     SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
+}
+
+static void sun50i_hdmi_phy_init_h6(struct sun8i_hdmi_phy *phy)
+{
+	regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+			   SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
+			   SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
+
+	regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+			   0xffff0000, 0x80c00000);
+}
+
 static void sun8i_hdmi_phy_init_a83t(struct sun8i_hdmi_phy *phy)
 {
+	sun8i_hdmi_phy_unlock(phy);
+
 	regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
 			   SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK,
 			   SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK);
@@ -298,6 +437,8 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
 {
 	unsigned int val;
 
+	sun8i_hdmi_phy_unlock(phy);
+
 	regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, 0);
 	regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
 			   SUN8I_HDMI_PHY_ANA_CFG1_ENBI,
@@ -372,20 +513,23 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
 
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
 {
-	/* enable read access to HDMI controller */
-	regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
-		     SUN8I_HDMI_PHY_READ_EN_MAGIC);
-
-	/* unscramble register offsets */
-	regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
-		     SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
-
 	phy->variant->phy_init(phy);
 }
 
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void)
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+			    struct dw_hdmi_plat_data *plat_data)
 {
-	return &sun8i_hdmi_phy_ops;
+	struct sun8i_hdmi_phy_variant *variant = phy->variant;
+
+	if (variant->is_custom_phy) {
+		plat_data->phy_ops = &sun8i_hdmi_phy_ops;
+		plat_data->phy_name = "sun8i_dw_hdmi_phy";
+		plat_data->phy_data = phy;
+	} else {
+		plat_data->mpll_cfg = variant->mpll_cfg;
+		plat_data->cur_ctr = variant->cur_ctr;
+		plat_data->phy_config = variant->phy_cfg;
+	}
 }
 
 static struct regmap_config sun8i_hdmi_phy_regmap_config = {
@@ -396,14 +540,8 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
 	.name		= "phy"
 };
 
-static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
-	.has_phy_clk = true,
-	.phy_init = &sun8i_hdmi_phy_init_h3,
-	.phy_disable = &sun8i_hdmi_phy_disable_h3,
-	.phy_config = &sun8i_hdmi_phy_config_h3,
-};
-
 static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
+	.is_custom_phy = true,
 	.phy_init = &sun8i_hdmi_phy_init_a83t,
 	.phy_disable = &sun8i_hdmi_phy_disable_a83t,
 	.phy_config = &sun8i_hdmi_phy_config_a83t,
@@ -411,6 +549,7 @@ static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
 
 static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
 	.has_phy_clk = true,
+	.is_custom_phy = true,
 	.phy_init = &sun8i_hdmi_phy_init_h3,
 	.phy_disable = &sun8i_hdmi_phy_disable_h3,
 	.phy_config = &sun8i_hdmi_phy_config_h3,
@@ -419,17 +558,29 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
 static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
 	.has_phy_clk = true,
 	.has_second_pll = true,
+	.is_custom_phy = true,
 	.phy_init = &sun8i_hdmi_phy_init_h3,
 	.phy_disable = &sun8i_hdmi_phy_disable_h3,
 	.phy_config = &sun8i_hdmi_phy_config_h3,
 };
 
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+	.has_phy_clk = true,
+	.is_custom_phy = true,
+	.phy_init = &sun8i_hdmi_phy_init_h3,
+	.phy_disable = &sun8i_hdmi_phy_disable_h3,
+	.phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
+static const struct sun8i_hdmi_phy_variant sun50i_h6_hdmi_phy = {
+	.cur_ctr  = sun50i_h6_cur_ctr,
+	.mpll_cfg = sun50i_h6_mpll_cfg,
+	.phy_cfg  = sun50i_h6_phy_config,
+	.phy_init = &sun50i_hdmi_phy_init_h6,
+};
+
 static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
 	{
-		.compatible = "allwinner,sun50i-a64-hdmi-phy",
-		.data = &sun50i_a64_hdmi_phy,
-	},
-	{
 		.compatible = "allwinner,sun8i-a83t-hdmi-phy",
 		.data = &sun8i_a83t_hdmi_phy,
 	},
@@ -441,6 +592,14 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
 		.compatible = "allwinner,sun8i-r40-hdmi-phy",
 		.data = &sun8i_r40_hdmi_phy,
 	},
+	{
+		.compatible = "allwinner,sun50i-a64-hdmi-phy",
+		.data = &sun50i_a64_hdmi_phy,
+	},
+	{
+		.compatible = "allwinner,sun50i-h6-hdmi-phy",
+		.data = &sun50i_h6_hdmi_phy,
+	},
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b3d02b..44a9ba7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -368,6 +368,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
 	struct sun8i_mixer *mixer;
 	struct resource *res;
 	void __iomem *regs;
+	unsigned int base;
 	int plane_cnt;
 	int i, ret;
 
@@ -456,33 +457,60 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
 
 	list_add_tail(&mixer->engine.list, &drv->engine_list);
 
-	/* Reset the registers */
-	for (i = 0x0; i < 0x20000; i += 4)
-		regmap_write(mixer->engine.regs, i, 0);
+	base = sun8i_blender_base(mixer);
+
+	/* Reset registers and disable unused sub-engines */
+	if (mixer->cfg->is_de3) {
+		for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
+			regmap_write(mixer->engine.regs, i, 0);
+
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_FCE_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_PEAK_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_LCTI_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_BLS_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_FCC_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_DNS_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_DRC_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
+		regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
+	} else {
+		for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
+			regmap_write(mixer->engine.regs, i, 0);
+
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_FCE_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_BWS_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_LTI_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_PEAK_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_ASE_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_FCC_EN, 0);
+		regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
+	}
 
 	/* Enable the mixer */
 	regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
 		     SUN8I_MIXER_GLOBAL_CTL_RT_EN);
 
 	/* Set background color to black */
-	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR,
+	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
 		     SUN8I_MIXER_BLEND_COLOR_BLACK);
 
 	/*
 	 * Set fill color of bottom plane to black. Generally not needed
 	 * except when VI plane is at bottom (zpos = 0) and enabled.
 	 */
-	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
 		     SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
-	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
+	regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
 		     SUN8I_MIXER_BLEND_COLOR_BLACK);
 
 	plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
 	for (i = 0; i < plane_cnt; i++)
-		regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
+		regmap_write(mixer->engine.regs,
+			     SUN8I_MIXER_BLEND_MODE(base, i),
 			     SUN8I_MIXER_BLEND_MODE_DEF);
 
-	regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+	regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
 			   SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
 
 	return 0;
@@ -585,6 +613,15 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
 	.vi_num		= 1,
 };
 
+static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
+	.ccsc		= 0,
+	.is_de3		= true,
+	.mod_rate	= 600000000,
+	.scaler_mask	= 0xf,
+	.ui_num		= 3,
+	.vi_num		= 1,
+};
+
 static const struct of_device_id sun8i_mixer_of_table[] = {
 	{
 		.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -618,6 +655,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
 		.compatible = "allwinner,sun50i-a64-de2-mixer-1",
 		.data = &sun50i_a64_mixer1_cfg,
 	},
+	{
+		.compatible = "allwinner,sun50i-h6-de3-mixer-0",
+		.data = &sun50i_h6_mixer0_cfg,
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 406c42e..913d14c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -29,24 +29,41 @@
 
 #define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE		BIT(0)
 
-#define SUN8I_MIXER_BLEND_PIPE_CTL		0x1000
-#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(x)	(0x1004 + 0x10 * (x) + 0x0)
-#define SUN8I_MIXER_BLEND_ATTR_INSIZE(x)	(0x1004 + 0x10 * (x) + 0x4)
-#define SUN8I_MIXER_BLEND_ATTR_COORD(x)		(0x1004 + 0x10 * (x) + 0x8)
-#define SUN8I_MIXER_BLEND_ROUTE			0x1080
-#define SUN8I_MIXER_BLEND_PREMULTIPLY		0x1084
-#define SUN8I_MIXER_BLEND_BKCOLOR		0x1088
-#define SUN8I_MIXER_BLEND_OUTSIZE		0x108c
-#define SUN8I_MIXER_BLEND_MODE(x)		(0x1090 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_CTL		0x10b0
-#define SUN8I_MIXER_BLEND_CK_CFG		0x10b4
-#define SUN8I_MIXER_BLEND_CK_MAX(x)		(0x10c0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_MIN(x)		(0x10e0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_OUTCTL		0x10fc
+#define DE2_MIXER_UNIT_SIZE			0x6000
+#define DE3_MIXER_UNIT_SIZE			0x3000
+
+#define DE2_BLD_BASE				0x1000
+#define DE2_CH_BASE				0x2000
+#define DE2_CH_SIZE				0x1000
+
+#define DE3_BLD_BASE				0x0800
+#define DE3_CH_BASE				0x1000
+#define DE3_CH_SIZE				0x0800
+
+#define SUN8I_MIXER_BLEND_PIPE_CTL(base)	((base) + 0)
+#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, x)	((base) + 0x4 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_INSIZE(base, x)	((base) + 0x8 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_COORD(base, x)	((base) + 0xc + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ROUTE(base)		((base) + 0x80)
+#define SUN8I_MIXER_BLEND_PREMULTIPLY(base)	((base) + 0x84)
+#define SUN8I_MIXER_BLEND_BKCOLOR(base)		((base) + 0x88)
+#define SUN8I_MIXER_BLEND_OUTSIZE(base)		((base) + 0x8c)
+#define SUN8I_MIXER_BLEND_MODE(base, x)		((base) + 0x90 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_CTL(base)		((base) + 0xb0)
+#define SUN8I_MIXER_BLEND_CK_CFG(base)		((base) + 0xb4)
+#define SUN8I_MIXER_BLEND_CK_MAX(base, x)	((base) + 0xc0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_MIN(base, x)	((base) + 0xe0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_OUTCTL(base)		((base) + 0xfc)
+#define SUN50I_MIXER_BLEND_CSC_CTL(base)	((base) + 0x100)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
+	((base) + 0x110 + (layer) * 0x30 +  (x) * 0x10 + 4 * (y))
+#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
+	((base) + 0x110 + (layer) * 0x30 +  (i) * 0x10 + 0x0c)
 
 #define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK	GENMASK(12, 8)
 #define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe)	BIT(8 + pipe)
 #define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe)	BIT(pipe)
+
 /* colors are always in AARRGGBB format */
 #define SUN8I_MIXER_BLEND_COLOR_BLACK		0xff000000
 /* The following numbers are some still unknown magic numbers */
@@ -57,6 +74,9 @@
 
 #define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED	BIT(1)
 
+#define SUN50I_MIXER_BLEND_CSC_CTL_EN(ch)	BIT(ch)
+#define SUN50I_MIXER_BLEND_CSC_CONST_VAL(d, c)	(((d) << 16) | ((c) & 0xffff))
+
 #define SUN8I_MIXER_FBFMT_ARGB8888	0
 #define SUN8I_MIXER_FBFMT_ABGR8888	1
 #define SUN8I_MIXER_FBFMT_RGBA8888	2
@@ -95,8 +115,8 @@
 #define SUN8I_MIXER_FBFMT_YUV411	14
 
 /*
- * These sub-engines are still unknown now, the EN registers are here only to
- * be used to disable these sub-engines.
+ * Sub-engines listed bellow are unused for now. The EN registers are here only
+ * to be used to disable these sub-engines.
  */
 #define SUN8I_MIXER_FCE_EN			0xa0000
 #define SUN8I_MIXER_BWS_EN			0xa2000
@@ -106,6 +126,17 @@
 #define SUN8I_MIXER_FCC_EN			0xaa000
 #define SUN8I_MIXER_DCSC_EN			0xb0000
 
+#define SUN50I_MIXER_FCE_EN			0x70000
+#define SUN50I_MIXER_PEAK_EN			0x70800
+#define SUN50I_MIXER_LCTI_EN			0x71000
+#define SUN50I_MIXER_BLS_EN			0x71800
+#define SUN50I_MIXER_FCC_EN			0x72000
+#define SUN50I_MIXER_DNS_EN			0x80000
+#define SUN50I_MIXER_DRC_EN			0xa0000
+#define SUN50I_MIXER_FMT_EN			0xa8000
+#define SUN50I_MIXER_CDC0_EN			0xd0000
+#define SUN50I_MIXER_CDC1_EN			0xd8000
+
 struct de2_fmt_info {
 	u32			drm_fmt;
 	u32			de2_fmt;
@@ -127,6 +158,7 @@ struct de2_fmt_info {
  *	are invalid.
  * @mod_rate: module clock rate that needs to be set in order to have
  *	a functional block.
+ * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
  */
 struct sun8i_mixer_cfg {
 	int		vi_num;
@@ -134,6 +166,7 @@ struct sun8i_mixer_cfg {
 	int		scaler_mask;
 	int		ccsc;
 	unsigned long	mod_rate;
+	unsigned int	is_de3 : 1;
 };
 
 struct sun8i_mixer {
@@ -153,5 +186,20 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
 	return container_of(engine, struct sun8i_mixer, engine);
 }
 
+static inline u32
+sun8i_blender_base(struct sun8i_mixer *mixer)
+{
+	return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline u32
+sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
+{
+	if (mixer->cfg->is_de3)
+		return DE3_CH_BASE + channel * DE3_CH_SIZE;
+	else
+		return DE2_CH_BASE + channel * DE2_CH_SIZE;
+}
+
 const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
 #endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 3040a79..fc36e0c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -9,11 +9,17 @@
 #include <linux/component.h>
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 
 #include "sun8i_tcon_top.h"
 
+struct sun8i_tcon_top_quirks {
+	bool has_tcon_tv1;
+	bool has_dsi;
+};
+
 static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
 {
 	return !!of_match_node(sun8i_tcon_top_of_table, node);
@@ -121,10 +127,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 	struct platform_device *pdev = to_platform_device(dev);
 	struct clk_hw_onecell_data *clk_data;
 	struct sun8i_tcon_top *tcon_top;
+	const struct sun8i_tcon_top_quirks *quirks;
 	struct resource *res;
 	void __iomem *regs;
 	int ret, i;
 
+	quirks = of_device_get_match_data(&pdev->dev);
+
 	tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
 	if (!tcon_top)
 		return -ENOMEM;
@@ -168,6 +177,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 	}
 
 	/*
+	 * At least on H6, some registers have some bits set by default
+	 * which may cause issues. Clear them here.
+	 */
+	writel(0, regs + TCON_TOP_PORT_SEL_REG);
+	writel(0, regs + TCON_TOP_GATE_SRC_REG);
+
+	/*
 	 * TCON TOP has two muxes, which select parent clock for each TCON TV
 	 * channel clock. Parent could be either TCON TV or TVE clock. For now
 	 * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
@@ -180,15 +196,17 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 					     &tcon_top->reg_lock,
 					     TCON_TOP_TCON_TV0_GATE, 0);
 
-	clk_data->hws[CLK_TCON_TOP_TV1] =
-		sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
-					     &tcon_top->reg_lock,
-					     TCON_TOP_TCON_TV1_GATE, 1);
+	if (quirks->has_tcon_tv1)
+		clk_data->hws[CLK_TCON_TOP_TV1] =
+			sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+						     &tcon_top->reg_lock,
+						     TCON_TOP_TCON_TV1_GATE, 1);
 
-	clk_data->hws[CLK_TCON_TOP_DSI] =
-		sun8i_tcon_top_register_gate(dev, "dsi", regs,
-					     &tcon_top->reg_lock,
-					     TCON_TOP_TCON_DSI_GATE, 2);
+	if (quirks->has_dsi)
+		clk_data->hws[CLK_TCON_TOP_DSI] =
+			sun8i_tcon_top_register_gate(dev, "dsi", regs,
+						     &tcon_top->reg_lock,
+						     TCON_TOP_TCON_DSI_GATE, 2);
 
 	for (i = 0; i < CLK_NUM; i++)
 		if (IS_ERR(clk_data->hws[i])) {
@@ -250,9 +268,25 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
 	return 0;
 }
 
+const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+	.has_tcon_tv1	= true,
+	.has_dsi	= true,
+};
+
+const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+	/* Nothing special */
+};
+
 /* sun4i_drv uses this list to check if a device node is a TCON TOP */
 const struct of_device_id sun8i_tcon_top_of_table[] = {
-	{ .compatible = "allwinner,sun8i-r40-tcon-top" },
+	{
+		.compatible = "allwinner,sun8i-r40-tcon-top",
+		.data = &sun8i_r40_tcon_top_quirks
+	},
+	{
+		.compatible = "allwinner,sun50i-h6-tcon-top",
+		.data = &sun50i_h6_tcon_top_quirks
+	},
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 28c15c6..1853426 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,6 +19,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drmP.h>
 
@@ -30,7 +31,10 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
 				  int overlay, bool enable, unsigned int zpos,
 				  unsigned int old_zpos)
 {
-	u32 val;
+	u32 val, bld_base, ch_base;
+
+	bld_base = sun8i_blender_base(mixer);
+	ch_base = sun8i_channel_base(mixer, channel);
 
 	DRM_DEBUG_DRIVER("%sabling channel %d overlay %d\n",
 			 enable ? "En" : "Dis", channel, overlay);
@@ -41,17 +45,17 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
 		val = 0;
 
 	regmap_update_bits(mixer->engine.regs,
-			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
 			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
 
 	if (!enable || zpos != old_zpos) {
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_PIPE_CTL,
+				   SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
 				   SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
 				   0);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_ROUTE,
+				   SUN8I_MIXER_BLEND_ROUTE(bld_base),
 				   SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
 				   0);
 	}
@@ -60,12 +64,13 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
 		val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+				   SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+				   val, val);
 
 		val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_ROUTE,
+				   SUN8I_MIXER_BLEND_ROUTE(bld_base),
 				   SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
 				   val);
 	}
@@ -77,12 +82,16 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 {
 	struct drm_plane_state *state = plane->state;
 	u32 src_w, src_h, dst_w, dst_h;
+	u32 bld_base, ch_base;
 	u32 outsize, insize;
 	u32 hphase, vphase;
 
 	DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
 			 channel, overlay);
 
+	bld_base = sun8i_blender_base(mixer);
+	ch_base = sun8i_channel_base(mixer, channel);
+
 	src_w = drm_rect_width(&state->src) >> 16;
 	src_h = drm_rect_height(&state->src) >> 16;
 	dst_w = drm_rect_width(&state->dst);
@@ -103,8 +112,8 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 		regmap_write(mixer->engine.regs,
 			     SUN8I_MIXER_GLOBAL_SIZE,
 			     outsize);
-		regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTSIZE,
-			     outsize);
+		regmap_write(mixer->engine.regs,
+			     SUN8I_MIXER_BLEND_OUTSIZE(bld_base), outsize);
 
 		if (state->crtc)
 			interlaced = state->crtc->state->adjusted_mode.flags
@@ -116,7 +125,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 			val = 0;
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_OUTCTL,
+				   SUN8I_MIXER_BLEND_OUTCTL(bld_base),
 				   SUN8I_MIXER_BLEND_OUTCTL_INTERLACED,
 				   val);
 
@@ -129,10 +138,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 			 state->src.x1 >> 16, state->src.y1 >> 16);
 	DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_UI_LAYER_SIZE(channel, overlay),
+		     SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay),
 		     insize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_UI_OVL_SIZE(channel),
+		     SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base),
 		     insize);
 
 	if (insize != outsize || hphase || vphase) {
@@ -156,10 +165,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 			 state->dst.x1, state->dst.y1);
 	DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+		     SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
 		     SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+		     SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
 		     outsize);
 
 	return 0;
@@ -170,7 +179,9 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
 {
 	struct drm_plane_state *state = plane->state;
 	const struct de2_fmt_info *fmt_info;
-	u32 val;
+	u32 val, ch_base;
+
+	ch_base = sun8i_channel_base(mixer, channel);
 
 	fmt_info = sun8i_mixer_format_info(state->fb->format->format);
 	if (!fmt_info || !fmt_info->rgb) {
@@ -180,7 +191,7 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
 
 	val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
 	regmap_update_bits(mixer->engine.regs,
-			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
 			   SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
 
 	return 0;
@@ -193,8 +204,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
 	struct drm_framebuffer *fb = state->fb;
 	struct drm_gem_cma_object *gem;
 	dma_addr_t paddr;
+	u32 ch_base;
 	int bpp;
 
+	ch_base = sun8i_channel_base(mixer, channel);
+
 	/* Get the physical address of the buffer in memory */
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
 
@@ -211,13 +225,13 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
 	/* Set the line width */
 	DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_UI_LAYER_PITCH(channel, overlay),
+		     SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
 		     fb->pitches[0]);
 
 	DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
 
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(channel, overlay),
+		     SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
 		     lower_32_bits(paddr));
 
 	return 0;
@@ -287,6 +301,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
 }
 
 static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+	.prepare_fb	= drm_gem_fb_prepare_fb,
 	.atomic_check	= sun8i_ui_layer_atomic_check,
 	.atomic_disable	= sun8i_ui_layer_atomic_disable,
 	.atomic_update	= sun8i_ui_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
index 123b15e..f4389cf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -18,23 +18,26 @@
 
 #include <drm/drm_plane.h>
 
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0xc)
-#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x10)
-#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x14)
-#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(ch, layer) \
-			(0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x18)
-#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(ch)	(0x2000 + 0x1000 * (ch) + 0x80)
-#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(ch)	(0x2000 + 0x1000 * (ch) + 0x84)
-#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch)	(0x2000 + 0x1000 * (ch) + 0x88)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(base, layer) \
+			((base) + 0x20 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(base, layer) \
+			((base) + 0x20 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(base, layer) \
+			((base) + 0x20 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(base, layer) \
+			((base) + 0x20 * (layer) + 0xc)
+#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(base, layer) \
+			((base) + 0x20 * (layer) + 0x10)
+#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(base, layer) \
+			((base) + 0x20 * (layer) + 0x14)
+#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(base, layer) \
+			((base) + 0x20 * (layer) + 0x18)
+#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(base) \
+			((base) + 0x80)
+#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(base) \
+			((base) + 0x84)
+#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(base) \
+			((base) + 0x88)
 
 #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN		BIT(0)
 #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK	GENMASK(2, 1)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index 6bb2aa1..ae0806b 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -10,6 +10,7 @@
  */
 
 #include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
 
 static const u32 lan2coefftab16[240] = {
 	0x00004000, 0x00033ffe, 0x00063efc, 0x000a3bfb,
@@ -88,6 +89,20 @@ static const u32 lan2coefftab16[240] = {
 	0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
 };
 
+static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+	int vi_num = mixer->cfg->vi_num;
+
+	if (mixer->cfg->is_de3)
+		return DE3_VI_SCALER_UNIT_BASE +
+		       DE3_VI_SCALER_UNIT_SIZE * vi_num +
+		       DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+	else
+		return DE2_VI_SCALER_UNIT_BASE +
+		       DE2_VI_SCALER_UNIT_SIZE * vi_num +
+		       DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+}
+
 static int sun8i_ui_scaler_coef_index(unsigned int step)
 {
 	unsigned int scale, int_part, float_part;
@@ -114,33 +129,35 @@ static int sun8i_ui_scaler_coef_index(unsigned int step)
 
 void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
 {
-	int vi_cnt = mixer->cfg->vi_num;
-	u32 val;
+	u32 val, base;
 
-	if (WARN_ON(layer < vi_cnt))
+	if (WARN_ON(layer < mixer->cfg->vi_num))
 		return;
 
+	base = sun8i_ui_scaler_base(mixer, layer);
+
 	if (enable)
 		val = SUN8I_SCALER_GSU_CTRL_EN |
 		      SUN8I_SCALER_GSU_CTRL_COEFF_RDY;
 	else
 		val = 0;
 
-	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_CTRL(vi_cnt, layer - vi_cnt), val);
+	regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val);
 }
 
 void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
 			   u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
 			   u32 hscale, u32 vscale, u32 hphase, u32 vphase)
 {
-	int vi_cnt = mixer->cfg->vi_num;
 	u32 insize, outsize;
 	int i, offset;
+	u32 base;
 
-	if (WARN_ON(layer < vi_cnt))
+	if (WARN_ON(layer < mixer->cfg->vi_num))
 		return;
 
+	base = sun8i_ui_scaler_base(mixer, layer);
+
 	hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
 	vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
 	hscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
@@ -149,24 +166,22 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
 	insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
 	outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
 
-	layer -= vi_cnt;
-
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, layer), outsize);
+		     SUN8I_SCALER_GSU_OUTSIZE(base), outsize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_INSIZE(vi_cnt, layer), insize);
+		     SUN8I_SCALER_GSU_INSIZE(base), insize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_HSTEP(vi_cnt, layer), hscale);
+		     SUN8I_SCALER_GSU_HSTEP(base), hscale);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_VSTEP(vi_cnt, layer), vscale);
+		     SUN8I_SCALER_GSU_VSTEP(base), vscale);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_HPHASE(vi_cnt, layer), hphase);
+		     SUN8I_SCALER_GSU_HPHASE(base), hphase);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_GSU_VPHASE(vi_cnt, layer), vphase);
+		     SUN8I_SCALER_GSU_VPHASE(base), vphase);
 	offset = sun8i_ui_scaler_coef_index(hscale) *
 			SUN8I_UI_SCALER_COEFF_COUNT;
 	for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
 		regmap_write(mixer->engine.regs,
-			     SUN8I_SCALER_GSU_HCOEFF(vi_cnt, layer, i),
+			     SUN8I_SCALER_GSU_HCOEFF(base, i),
 			     lan2coefftab16[offset + i]);
 }
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
index 86295be..1ef4bd6 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
@@ -11,6 +11,9 @@
 
 #include "sun8i_mixer.h"
 
+#define DE2_UI_SCALER_UNIT_SIZE 0x10000
+#define DE3_UI_SCALER_UNIT_SIZE 0x08000
+
 /* this two macros assumes 16 fractional bits which is standard in DRM */
 #define SUN8I_UI_SCALER_SCALE_MIN		1
 #define SUN8I_UI_SCALER_SCALE_MAX		((1UL << 20) - 1)
@@ -20,23 +23,14 @@
 #define SUN8I_UI_SCALER_COEFF_COUNT		16
 #define SUN8I_UI_SCALER_SIZE(w, h)		(((h) - 1) << 16 | ((w) - 1))
 
-#define SUN8I_SCALER_GSU_CTRL(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x0)
-#define SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x40)
-#define SUN8I_SCALER_GSU_INSIZE(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x80)
-#define SUN8I_SCALER_GSU_HSTEP(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x88)
-#define SUN8I_SCALER_GSU_VSTEP(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x8c)
-#define SUN8I_SCALER_GSU_HPHASE(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x90)
-#define SUN8I_SCALER_GSU_VPHASE(vi_cnt, ui_idx) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x98)
-#define SUN8I_SCALER_GSU_HCOEFF(vi_cnt, ui_idx, index) \
-	(0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x200 + \
-	0x4 * (index))
+#define SUN8I_SCALER_GSU_CTRL(base)		((base) + 0x0)
+#define SUN8I_SCALER_GSU_OUTSIZE(base)		((base) + 0x40)
+#define SUN8I_SCALER_GSU_INSIZE(base)		((base) + 0x80)
+#define SUN8I_SCALER_GSU_HSTEP(base)		((base) + 0x88)
+#define SUN8I_SCALER_GSU_VSTEP(base)		((base) + 0x8c)
+#define SUN8I_SCALER_GSU_HPHASE(base)		((base) + 0x90)
+#define SUN8I_SCALER_GSU_VPHASE(base)		((base) + 0x98)
+#define SUN8I_SCALER_GSU_HCOEFF(base, index)	((base) + 0x200 + 0x4 * (index))
 
 #define SUN8I_SCALER_GSU_CTRL_EN		BIT(0)
 #define SUN8I_SCALER_GSU_CTRL_COEFF_RDY		BIT(4)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index f4fe978..87be898 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -13,6 +13,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drmP.h>
 
@@ -24,7 +25,10 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
 				  int overlay, bool enable, unsigned int zpos,
 				  unsigned int old_zpos)
 {
-	u32 val;
+	u32 val, bld_base, ch_base;
+
+	bld_base = sun8i_blender_base(mixer);
+	ch_base = sun8i_channel_base(mixer, channel);
 
 	DRM_DEBUG_DRIVER("%sabling VI channel %d overlay %d\n",
 			 enable ? "En" : "Dis", channel, overlay);
@@ -35,17 +39,17 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
 		val = 0;
 
 	regmap_update_bits(mixer->engine.regs,
-			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
 			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
 
 	if (!enable || zpos != old_zpos) {
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_PIPE_CTL,
+				   SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
 				   SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
 				   0);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_ROUTE,
+				   SUN8I_MIXER_BLEND_ROUTE(bld_base),
 				   SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
 				   0);
 	}
@@ -54,12 +58,13 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
 		val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+				   SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+				   val, val);
 
 		val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
 
 		regmap_update_bits(mixer->engine.regs,
-				   SUN8I_MIXER_BLEND_ROUTE,
+				   SUN8I_MIXER_BLEND_ROUTE(bld_base),
 				   SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
 				   val);
 	}
@@ -72,6 +77,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 	struct drm_plane_state *state = plane->state;
 	const struct drm_format_info *format = state->fb->format;
 	u32 src_w, src_h, dst_w, dst_h;
+	u32 bld_base, ch_base;
 	u32 outsize, insize;
 	u32 hphase, vphase;
 	bool subsampled;
@@ -79,6 +85,9 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 	DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
 			 channel, overlay);
 
+	bld_base = sun8i_blender_base(mixer);
+	ch_base = sun8i_channel_base(mixer, channel);
+
 	src_w = drm_rect_width(&state->src) >> 16;
 	src_h = drm_rect_height(&state->src) >> 16;
 	dst_w = drm_rect_width(&state->dst);
@@ -115,10 +124,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 			 (state->src.y1 >> 16) & ~(format->vsub - 1));
 	DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_VI_LAYER_SIZE(channel, overlay),
+		     SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay),
 		     insize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_CHAN_VI_OVL_SIZE(channel),
+		     SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base),
 		     insize);
 
 	/*
@@ -149,10 +158,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
 			 state->dst.x1, state->dst.y1);
 	DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+		     SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
 		     SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
 	regmap_write(mixer->engine.regs,
-		     SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+		     SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
 		     outsize);
 
 	return 0;
@@ -163,7 +172,9 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
 {
 	struct drm_plane_state *state = plane->state;
 	const struct de2_fmt_info *fmt_info;
-	u32 val;
+	u32 val, ch_base;
+
+	ch_base = sun8i_channel_base(mixer, channel);
 
 	fmt_info = sun8i_mixer_format_info(state->fb->format->format);
 	if (!fmt_info) {
@@ -173,7 +184,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
 
 	val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
 	regmap_update_bits(mixer->engine.regs,
-			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
 			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
 
 	if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
@@ -189,9 +200,17 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
 		val = 0;
 
 	regmap_update_bits(mixer->engine.regs,
-			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
 			   SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
 
+	/* It seems that YUV formats use global alpha setting. */
+	if (mixer->cfg->is_de3)
+		regmap_update_bits(mixer->engine.regs,
+				   SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
+								  overlay),
+				   SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK,
+				   SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(0xff));
+
 	return 0;
 }
 
@@ -204,8 +223,11 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
 	struct drm_gem_cma_object *gem;
 	u32 dx, dy, src_x, src_y;
 	dma_addr_t paddr;
+	u32 ch_base;
 	int i;
 
+	ch_base = sun8i_channel_base(mixer, channel);
+
 	/* Adjust x and y to be dividable by subsampling factor */
 	src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
 	src_y = (state->src.y1 >> 16) & ~(format->vsub - 1);
@@ -235,17 +257,17 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
 		DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
 				 i + 1, fb->pitches[i]);
 		regmap_write(mixer->engine.regs,
-			     SUN8I_MIXER_CHAN_VI_LAYER_PITCH(channel,
+			     SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base,
 							     overlay, i),
-	       fb->pitches[i]);
+			     fb->pitches[i]);
 
 		DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
 				 i + 1, &paddr);
 
 		regmap_write(mixer->engine.regs,
-			     SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(channel,
+			     SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
 								 overlay, i),
-	       lower_32_bits(paddr));
+			     lower_32_bits(paddr));
 	}
 
 	return 0;
@@ -315,6 +337,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
 }
 
 static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+	.prepare_fb	= drm_gem_fb_prepare_fb,
 	.atomic_check	= sun8i_vi_layer_atomic_check,
 	.atomic_disable	= sun8i_vi_layer_atomic_disable,
 	.atomic_update	= sun8i_vi_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 6996627..8a5e6d0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -12,23 +12,26 @@
 
 #include <drm/drm_plane.h>
 
-#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch, layer) \
-		(0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch, layer) \
-		(0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(ch, layer) \
-		(0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch, layer, plane) \
-		(0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0xc + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch, layer, plane) \
-		(0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x18 + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch)	(0x2000 + 0x1000 * (ch) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(base, layer) \
+		((base) + 0x30 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(base, layer) \
+		((base) + 0x30 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(base, layer) \
+		((base) + 0x30 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(base, layer, plane) \
+		((base) + 0x30 * (layer) + 0xc + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(base, layer, plane) \
+		((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
+		((base) + 0xe8)
 
 #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN		BIT(0)
 /* RGB mode should be set for RGB formats and cleared for YCbCr */
 #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE		BIT(15)
 #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET	8
 #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK	GENMASK(12, 8)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK	GENMASK(31, 24)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x)	((x) << 24)
 
 struct sun8i_mixer;
 
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index d3f1acb..7ba7501 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -833,6 +833,16 @@ static const u32 bicubic4coefftab32[480] = {
 	0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
 };
 
+static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+	if (mixer->cfg->is_de3)
+		return DE3_VI_SCALER_UNIT_BASE +
+		       DE3_VI_SCALER_UNIT_SIZE * channel;
+	else
+		return DE2_VI_SCALER_UNIT_BASE +
+		       DE2_VI_SCALER_UNIT_SIZE * channel;
+}
+
 static int sun8i_vi_scaler_coef_index(unsigned int step)
 {
 	unsigned int scale, int_part, float_part;
@@ -857,7 +867,7 @@ static int sun8i_vi_scaler_coef_index(unsigned int step)
 	}
 }
 
-static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
+static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
 				      u32 hstep, u32 vstep,
 				      const struct drm_format_info *format)
 {
@@ -877,29 +887,31 @@ static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
 	offset = sun8i_vi_scaler_coef_index(hstep) *
 			SUN8I_VI_SCALER_COEFF_COUNT;
 	for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
-		regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i),
 			     lan3coefftab32_left[offset + i]);
-		regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i),
 			     lan3coefftab32_right[offset + i]);
-		regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i),
 			     ch_left[offset + i]);
-		regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i),
 			     ch_right[offset + i]);
 	}
 
 	offset = sun8i_vi_scaler_coef_index(hstep) *
 			SUN8I_VI_SCALER_COEFF_COUNT;
 	for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
-		regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i),
 			     lan2coefftab32[offset + i]);
-		regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(layer, i),
+		regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i),
 			     cy[offset + i]);
 	}
 }
 
 void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
 {
-	u32 val;
+	u32 val, base;
+
+	base = sun8i_vi_scaler_base(mixer, layer);
 
 	if (enable)
 		val = SUN8I_SCALER_VSU_CTRL_EN |
@@ -907,7 +919,8 @@ void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
 	else
 		val = 0;
 
-	regmap_write(mixer->engine.regs, SUN8I_SCALER_VSU_CTRL(layer), val);
+	regmap_write(mixer->engine.regs,
+		     SUN8I_SCALER_VSU_CTRL(base), val);
 }
 
 void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
@@ -917,6 +930,9 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
 {
 	u32 chphase, cvphase;
 	u32 insize, outsize;
+	u32 base;
+
+	base = sun8i_vi_scaler_base(mixer, layer);
 
 	hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
 	vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
@@ -940,32 +956,44 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
 		cvphase = vphase;
 	}
 
+	if (mixer->cfg->is_de3) {
+		u32 val;
+
+		if (format->hsub == 1 && format->vsub == 1)
+			val = SUN50I_SCALER_VSU_SCALE_MODE_UI;
+		else
+			val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL;
+
+		regmap_write(mixer->engine.regs,
+			     SUN50I_SCALER_VSU_SCALE_MODE(base), val);
+	}
+
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_OUTSIZE(layer), outsize);
+		     SUN8I_SCALER_VSU_OUTSIZE(base), outsize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_YINSIZE(layer), insize);
+		     SUN8I_SCALER_VSU_YINSIZE(base), insize);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_YHSTEP(layer), hscale);
+		     SUN8I_SCALER_VSU_YHSTEP(base), hscale);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_YVSTEP(layer), vscale);
+		     SUN8I_SCALER_VSU_YVSTEP(base), vscale);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_YHPHASE(layer), hphase);
+		     SUN8I_SCALER_VSU_YHPHASE(base), hphase);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_YVPHASE(layer), vphase);
+		     SUN8I_SCALER_VSU_YVPHASE(base), vphase);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_CINSIZE(layer),
+		     SUN8I_SCALER_VSU_CINSIZE(base),
 		     SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
 					  src_h / format->vsub));
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_CHSTEP(layer),
+		     SUN8I_SCALER_VSU_CHSTEP(base),
 		     hscale / format->hsub);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_CVSTEP(layer),
+		     SUN8I_SCALER_VSU_CVSTEP(base),
 		     vscale / format->vsub);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_CHPHASE(layer), chphase);
+		     SUN8I_SCALER_VSU_CHPHASE(base), chphase);
 	regmap_write(mixer->engine.regs,
-		     SUN8I_SCALER_VSU_CVPHASE(layer), cvphase);
-	sun8i_vi_scaler_set_coeff(mixer->engine.regs, layer,
+		     SUN8I_SCALER_VSU_CVPHASE(base), cvphase);
+	sun8i_vi_scaler_set_coeff(mixer->engine.regs, base,
 				  hscale, vscale, format);
 }
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
index a595ab6..68f6593 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
@@ -12,6 +12,12 @@
 #include <drm/drm_fourcc.h>
 #include "sun8i_mixer.h"
 
+#define DE2_VI_SCALER_UNIT_BASE 0x20000
+#define DE2_VI_SCALER_UNIT_SIZE 0x20000
+
+#define DE3_VI_SCALER_UNIT_BASE 0x20000
+#define DE3_VI_SCALER_UNIT_SIZE 0x08000
+
 /* this two macros assumes 16 fractional bits which is standard in DRM */
 #define SUN8I_VI_SCALER_SCALE_MIN		1
 #define SUN8I_VI_SCALER_SCALE_MAX		((1UL << 20) - 1)
@@ -21,34 +27,48 @@
 #define SUN8I_VI_SCALER_COEFF_COUNT		32
 #define SUN8I_VI_SCALER_SIZE(w, h)		(((h) - 1) << 16 | ((w) - 1))
 
-#define SUN8I_SCALER_VSU_CTRL(ch)	(0x20000 + 0x20000 * (ch) + 0x0)
-#define SUN8I_SCALER_VSU_OUTSIZE(ch)	(0x20000 + 0x20000 * (ch) + 0x40)
-#define SUN8I_SCALER_VSU_YINSIZE(ch)	(0x20000 + 0x20000 * (ch) + 0x80)
-#define SUN8I_SCALER_VSU_YHSTEP(ch)	(0x20000 + 0x20000 * (ch) + 0x88)
-#define SUN8I_SCALER_VSU_YVSTEP(ch)	(0x20000 + 0x20000 * (ch) + 0x8c)
-#define SUN8I_SCALER_VSU_YHPHASE(ch)	(0x20000 + 0x20000 * (ch) + 0x90)
-#define SUN8I_SCALER_VSU_YVPHASE(ch)	(0x20000 + 0x20000 * (ch) + 0x98)
-#define SUN8I_SCALER_VSU_CINSIZE(ch)	(0x20000 + 0x20000 * (ch) + 0xc0)
-#define SUN8I_SCALER_VSU_CHSTEP(ch)	(0x20000 + 0x20000 * (ch) + 0xc8)
-#define SUN8I_SCALER_VSU_CVSTEP(ch)	(0x20000 + 0x20000 * (ch) + 0xcc)
-#define SUN8I_SCALER_VSU_CHPHASE(ch)	(0x20000 + 0x20000 * (ch) + 0xd0)
-#define SUN8I_SCALER_VSU_CVPHASE(ch)	(0x20000 + 0x20000 * (ch) + 0xd8)
-#define SUN8I_SCALER_VSU_YHCOEFF0(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x200 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YHCOEFF1(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x300 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YVCOEFF(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x400 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF0(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x600 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF1(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x700 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CVCOEFF(ch, i) \
-	(0x20000 + 0x20000 * (ch) + 0x800 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CTRL(base)		((base) + 0x0)
+#define SUN50I_SCALER_VSU_SCALE_MODE(base)		((base) + 0x10)
+#define SUN50I_SCALER_VSU_DIR_THR(base)		((base) + 0x20)
+#define SUN50I_SCALER_VSU_EDGE_THR(base)		((base) + 0x24)
+#define SUN50I_SCALER_VSU_EDSCL_CTRL(base)		((base) + 0x28)
+#define SUN50I_SCALER_VSU_ANGLE_THR(base)		((base) + 0x2c)
+#define SUN8I_SCALER_VSU_OUTSIZE(base)		((base) + 0x40)
+#define SUN8I_SCALER_VSU_YINSIZE(base)		((base) + 0x80)
+#define SUN8I_SCALER_VSU_YHSTEP(base)		((base) + 0x88)
+#define SUN8I_SCALER_VSU_YVSTEP(base)		((base) + 0x8c)
+#define SUN8I_SCALER_VSU_YHPHASE(base)		((base) + 0x90)
+#define SUN8I_SCALER_VSU_YVPHASE(base)		((base) + 0x98)
+#define SUN8I_SCALER_VSU_CINSIZE(base)		((base) + 0xc0)
+#define SUN8I_SCALER_VSU_CHSTEP(base)		((base) + 0xc8)
+#define SUN8I_SCALER_VSU_CVSTEP(base)		((base) + 0xcc)
+#define SUN8I_SCALER_VSU_CHPHASE(base)		((base) + 0xd0)
+#define SUN8I_SCALER_VSU_CVPHASE(base)		((base) + 0xd8)
+#define SUN8I_SCALER_VSU_YHCOEFF0(base, i)	((base) + 0x200 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YHCOEFF1(base, i)	((base) + 0x300 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YVCOEFF(base, i)	((base) + 0x400 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF0(base, i)	((base) + 0x600 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF1(base, i)	((base) + 0x700 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CVCOEFF(base, i)	((base) + 0x800 + 0x4 * (i))
 
 #define SUN8I_SCALER_VSU_CTRL_EN		BIT(0)
 #define SUN8I_SCALER_VSU_CTRL_COEFF_RDY		BIT(4)
 
+#define SUN50I_SCALER_VSU_SUB_ZERO_DIR_THR(x)	(((x) << 24) & 0xFF)
+#define SUN50I_SCALER_VSU_ZERO_DIR_THR(x)		(((x) << 16) & 0xFF)
+#define SUN50I_SCALER_VSU_HORZ_DIR_THR(x)		(((x) << 8) & 0xFF)
+#define SUN50I_SCALER_VSU_VERT_DIR_THR(x)		((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_SCALE_MODE_UI		0
+#define SUN50I_SCALER_VSU_SCALE_MODE_NORMAL	1
+#define SUN50I_SCALER_VSU_SCALE_MODE_ED_SCALE	2
+
+#define SUN50I_SCALER_VSU_EDGE_SHIFT(x)		(((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_EDGE_OFFSET(x)		((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_ANGLE_SHIFT(x)		(((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_ANGLE_OFFSET(x)		((x) & 0xFF)
+
 void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
 void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
 			   u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f80e82e..607a6ea1 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1978,6 +1978,23 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+	unsigned int i;
+
+	if (!dc->soc->wgrps)
+		return true;
+
+	for (i = 0; i < dc->soc->num_wgrps; i++) {
+		const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+		if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+			return true;
+	}
+
+	return false;
+}
+
 static int tegra_dc_init(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1993,22 +2010,8 @@ static int tegra_dc_init(struct host1x_client *client)
 	 * assign a primary plane to them, which in turn will cause KMS to
 	 * crash.
 	 */
-	if (dc->soc->wgrps) {
-		bool has_wgrps = false;
-		unsigned int i;
-
-		for (i = 0; i < dc->soc->num_wgrps; i++) {
-			const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
-
-			if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
-				has_wgrps = true;
-				break;
-			}
-		}
-
-		if (!has_wgrps)
-			return 0;
-	}
+	if (!tegra_dc_has_window_groups(dc))
+		return 0;
 
 	dc->syncpt = host1x_syncpt_request(client, flags);
 	if (!dc->syncpt)
@@ -2094,6 +2097,9 @@ static int tegra_dc_exit(struct host1x_client *client)
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
 
+	if (!tegra_dc_has_window_groups(dc))
+		return 0;
+
 	devm_free_irq(dc->dev, dc->irq, dc);
 
 	err = tegra_dc_rgb_exit(dc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 65ea498..4b70ce6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1274,6 +1274,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra194-display", },
 	{ .compatible = "nvidia,tegra194-dc", },
 	{ .compatible = "nvidia,tegra194-sor", },
+	{ .compatible = "nvidia,tegra194-vic", },
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f685e729..352d05f 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -141,9 +141,9 @@ int falcon_load_firmware(struct falcon *falcon)
 	/* allocate iova space for the firmware */
 	falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
 						    &falcon->firmware.paddr);
-	if (!falcon->firmware.vaddr) {
-		dev_err(falcon->dev, "dma memory mapping failed\n");
-		return -ENOMEM;
+	if (IS_ERR(falcon->firmware.vaddr)) {
+		dev_err(falcon->dev, "DMA memory mapping failed\n");
+		return PTR_ERR(falcon->firmware.vaddr);
 	}
 
 	/* copy firmware image into local area. this also ensures endianness */
@@ -197,11 +197,19 @@ void falcon_exit(struct falcon *falcon)
 int falcon_boot(struct falcon *falcon)
 {
 	unsigned long offset;
+	u32 value;
 	int err;
 
 	if (!falcon->firmware.vaddr)
 		return -EINVAL;
 
+	err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+				 (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+					   FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+				 10, 10000);
+	if (err < 0)
+		return err;
+
 	falcon_writel(falcon, 0, FALCON_DMACTL);
 
 	/* setup the address of the binary data so Falcon can access it later */
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 6112d90..922a48d 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -742,7 +742,9 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
 
 static int tegra_display_hub_probe(struct platform_device *pdev)
 {
+	struct device_node *child = NULL;
 	struct tegra_display_hub *hub;
+	struct clk *clk;
 	unsigned int i;
 	int err;
 
@@ -801,6 +803,34 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
 			return err;
 	}
 
+	hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+				      GFP_KERNEL);
+	if (!hub->clk_heads)
+		return -ENOMEM;
+
+	for (i = 0; i < hub->num_heads; i++) {
+		child = of_get_next_child(pdev->dev.of_node, child);
+		if (!child) {
+			dev_err(&pdev->dev, "failed to find node for head %u\n",
+				i);
+			return -ENODEV;
+		}
+
+		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "failed to get clock for head %u\n",
+				i);
+			of_node_put(child);
+			return PTR_ERR(clk);
+		}
+
+		hub->clk_heads[i] = clk;
+	}
+
+	of_node_put(child);
+
 	/* XXX: enable clock across reset? */
 	err = reset_control_assert(hub->rst);
 	if (err < 0)
@@ -840,12 +870,16 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
 static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
 {
 	struct tegra_display_hub *hub = dev_get_drvdata(dev);
+	unsigned int i = hub->num_heads;
 	int err;
 
 	err = reset_control_assert(hub->rst);
 	if (err < 0)
 		return err;
 
+	while (i--)
+		clk_disable_unprepare(hub->clk_heads[i]);
+
 	clk_disable_unprepare(hub->clk_hub);
 	clk_disable_unprepare(hub->clk_dsc);
 	clk_disable_unprepare(hub->clk_disp);
@@ -856,6 +890,7 @@ static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
 static int __maybe_unused tegra_display_hub_resume(struct device *dev)
 {
 	struct tegra_display_hub *hub = dev_get_drvdata(dev);
+	unsigned int i;
 	int err;
 
 	err = clk_prepare_enable(hub->clk_disp);
@@ -870,13 +905,22 @@ static int __maybe_unused tegra_display_hub_resume(struct device *dev)
 	if (err < 0)
 		goto disable_dsc;
 
+	for (i = 0; i < hub->num_heads; i++) {
+		err = clk_prepare_enable(hub->clk_heads[i]);
+		if (err < 0)
+			goto disable_heads;
+	}
+
 	err = reset_control_deassert(hub->rst);
 	if (err < 0)
-		goto disable_hub;
+		goto disable_heads;
 
 	return 0;
 
-disable_hub:
+disable_heads:
+	while (i--)
+		clk_disable_unprepare(hub->clk_heads[i]);
+
 	clk_disable_unprepare(hub->clk_hub);
 disable_dsc:
 	clk_disable_unprepare(hub->clk_dsc);
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 6696a85..479087c 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -49,6 +49,9 @@ struct tegra_display_hub {
 	struct clk *clk_hub;
 	struct reset_control *rst;
 
+	unsigned int num_heads;
+	struct clk **clk_heads;
+
 	const struct tegra_display_hub_soc *soc;
 	struct tegra_windowgroup *wgrps;
 };
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b129da2..ef8692b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -19,6 +19,8 @@
 
 #include <soc/tegra/pmc.h>
 
+#include <sound/hda_verbs.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
@@ -29,14 +31,6 @@
 #include "sor.h"
 #include "trace.h"
 
-/*
- * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
- * merged. Having this around after the commit is merged should be safe since
- * the preprocessor will effectively replace all occurrences and therefore no
- * duplicate will be defined.
- */
-#define TEGRA_IO_PAD_HDMI_DP0 26
-
 #define SOR_REKEY 0x38
 
 struct tegra_sor_hdmi_settings {
@@ -407,6 +401,7 @@ struct tegra_sor {
 	const struct tegra_sor_soc *soc;
 	void __iomem *regs;
 	unsigned int index;
+	unsigned int irq;
 
 	struct reset_control *rst;
 	struct clk *clk_parent;
@@ -433,6 +428,11 @@ struct tegra_sor {
 
 	struct delayed_work scdc;
 	bool scdc_enabled;
+
+	struct {
+		unsigned int sample_rate;
+		unsigned int channels;
+	} audio;
 };
 
 struct tegra_sor_state {
@@ -2139,6 +2139,144 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
 	return 0;
 }
 
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+	size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+	for (i = 0; i < length; i++)
+		tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+				 SOR_AUDIO_HDA_ELD_BUFWR);
+
+	/*
+	 * The HDA codec will always report an ELD buffer size of 96 bytes and
+	 * the HDA codec driver will check that each byte read from the buffer
+	 * is valid. Therefore every byte must be written, even if no 96 bytes
+	 * were parsed from EDID.
+	 */
+	for (i = length; i < 96; i++)
+		tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+	u32 value;
+
+	tegra_sor_write_eld(sor);
+
+	value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+	tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+	tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+	u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+	struct hdmi_audio_infoframe frame;
+	u32 value;
+	int err;
+
+	err = hdmi_audio_infoframe_init(&frame);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+		return err;
+	}
+
+	frame.channels = sor->audio.channels;
+
+	err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+		return err;
+	}
+
+	tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+	value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+	value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+	value |= INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+	return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+	u32 value;
+
+	value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+	/* select HDA audio input */
+	value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+	value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+	/* inject null samples */
+	if (sor->audio.channels != 2)
+		value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+	else
+		value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+	value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+	tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+	/* enable advertising HBR capability */
+	tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+
+	tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+	value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+		SOR_HDMI_SPARE_CTS_RESET(1) |
+		SOR_HDMI_SPARE_HW_CTS_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+	/* enable HW CTS */
+	value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+	tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+	/* allow packet to be sent */
+	value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+	/* reset N counter and enable lookup */
+	value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+	value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+	tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+	tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+	tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+	tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+	tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+	tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+	tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+	tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+	value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+	tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+	tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+	value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+	tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+	tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+	value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+	tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+	tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+	value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+	value &= ~SOR_HDMI_AUDIO_N_RESET;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+	tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
 static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
 {
 	u32 value;
@@ -2148,6 +2286,11 @@ static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
 	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
 }
 
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+	tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
 static struct tegra_sor_hdmi_settings *
 tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
 {
@@ -2243,6 +2386,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
 	u32 value;
 	int err;
 
+	tegra_sor_audio_unprepare(sor);
 	tegra_sor_hdmi_scdc_stop(sor);
 
 	err = tegra_sor_detach(sor);
@@ -2651,6 +2795,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
 		dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
 
 	tegra_sor_hdmi_scdc_start(sor);
+	tegra_sor_audio_prepare(sor);
 }
 
 static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2666,6 +2811,7 @@ static int tegra_sor_init(struct host1x_client *client)
 	struct tegra_sor *sor = host1x_client_to_sor(client);
 	int connector = DRM_MODE_CONNECTOR_Unknown;
 	int encoder = DRM_MODE_ENCODER_NONE;
+	u32 value;
 	int err;
 
 	if (!sor->aux) {
@@ -2759,6 +2905,15 @@ static int tegra_sor_init(struct host1x_client *client)
 	if (err < 0)
 		return err;
 
+	/*
+	 * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+	 * is used for interoperability between the HDA codec driver and the
+	 * HDMI/DP driver.
+	 */
+	value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+	tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+	tegra_sor_writel(sor, value, SOR_INT_MASK);
+
 	return 0;
 }
 
@@ -2767,6 +2922,9 @@ static int tegra_sor_exit(struct host1x_client *client)
 	struct tegra_sor *sor = host1x_client_to_sor(client);
 	int err;
 
+	tegra_sor_writel(sor, 0, SOR_INT_MASK);
+	tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+
 	tegra_output_exit(&sor->output);
 
 	if (sor->aux) {
@@ -3037,6 +3195,54 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
 	return 0;
 }
 
+static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
+				   unsigned int *channels)
+{
+	unsigned int mul, div;
+
+	if (format & AC_FMT_BASE_44K)
+		*rate = 44100;
+	else
+		*rate = 48000;
+
+	mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+	div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+	*rate = *rate * (mul + 1) / (div + 1);
+
+	*channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+	struct tegra_sor *sor = data;
+	u32 value;
+
+	value = tegra_sor_readl(sor, SOR_INT_STATUS);
+	tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+	if (value & SOR_INT_CODEC_SCRATCH0) {
+		value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+		if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+			unsigned int format, sample_rate, channels;
+
+			format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+			tegra_hda_parse_format(format, &sample_rate, &channels);
+
+			sor->audio.sample_rate = sample_rate;
+			sor->audio.channels = channels;
+
+			tegra_sor_hdmi_audio_enable(sor);
+		} else {
+			tegra_sor_hdmi_audio_disable(sor);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int tegra_sor_probe(struct platform_device *pdev)
 {
 	struct device_node *np;
@@ -3119,14 +3325,38 @@ static int tegra_sor_probe(struct platform_device *pdev)
 		goto remove;
 	}
 
-	if (!pdev->dev.pm_domain) {
-		sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-		if (IS_ERR(sor->rst)) {
-			err = PTR_ERR(sor->rst);
+	err = platform_get_irq(pdev, 0);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+		goto remove;
+	}
+
+	sor->irq = err;
+
+	err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+			       dev_name(sor->dev), sor);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		goto remove;
+	}
+
+	sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+	if (IS_ERR(sor->rst)) {
+		err = PTR_ERR(sor->rst);
+
+		if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
 			dev_err(&pdev->dev, "failed to get reset control: %d\n",
 				err);
 			goto remove;
 		}
+
+		/*
+		 * At this point, the reset control is most likely being used
+		 * by the generic power domain implementation. With any luck
+		 * the power domain will have taken care of resetting the SOR
+		 * and we don't have to do anything.
+		 */
+		sor->rst = NULL;
 	}
 
 	sor->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index fb0854d..13f7e68 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -364,12 +364,28 @@
 #define  INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
 #define  INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
 
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define  SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define  SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
 #define SOR_HDMI_CTRL 0xc0
 #define  SOR_HDMI_CTRL_ENABLE (1 << 30)
 #define  SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
 #define  SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
 #define  SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
 
+#define SOR_HDMI_SPARE 0xcb
+#define  SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define  SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define  SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
 #define SOR_REFCLK 0xe6
 #define  SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
 #define  SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
@@ -378,10 +394,62 @@
 #define  SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
 #define  SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
 
+#define SOR_AUDIO_CNTRL 0xfc
+#define  SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define  SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define   SOURCE_SELECT_MASK 0x3
+#define   SOURCE_SELECT_HDA 0x2
+#define   SOURCE_SELECT_SPDIF 0x1
+#define   SOURCE_SELECT_AUTO 0x0
+#define  SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define  SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define  SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define  SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define  SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define  SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define  SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define  SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define  SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
 #define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
 #define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
 #define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
 
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
 #define SOR_HDMI2_CTRL 0x13e
 #define  SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
 #define  SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 9f657a6..d47983d 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -38,6 +38,7 @@ struct vic {
 	struct iommu_domain *domain;
 	struct device *dev;
 	struct clk *clk;
+	struct reset_control *rst;
 
 	/* Platform configuration */
 	const struct vic_config *config;
@@ -56,13 +57,37 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
 static int vic_runtime_resume(struct device *dev)
 {
 	struct vic *vic = dev_get_drvdata(dev);
+	int err;
 
-	return clk_prepare_enable(vic->clk);
+	err = clk_prepare_enable(vic->clk);
+	if (err < 0)
+		return err;
+
+	usleep_range(10, 20);
+
+	err = reset_control_deassert(vic->rst);
+	if (err < 0)
+		goto disable;
+
+	usleep_range(10, 20);
+
+	return 0;
+
+disable:
+	clk_disable_unprepare(vic->clk);
+	return err;
 }
 
 static int vic_runtime_suspend(struct device *dev)
 {
 	struct vic *vic = dev_get_drvdata(dev);
+	int err;
+
+	err = reset_control_assert(vic->rst);
+	if (err < 0)
+		return err;
+
+	usleep_range(2000, 4000);
 
 	clk_disable_unprepare(vic->clk);
 
@@ -282,10 +307,18 @@ static const struct vic_config vic_t186_config = {
 	.version = 0x18,
 };
 
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+	.firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+	.version = 0x19,
+};
+
 static const struct of_device_id vic_match[] = {
 	{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
 	{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
 	{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+	{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
 	{ },
 };
 
@@ -323,6 +356,14 @@ static int vic_probe(struct platform_device *pdev)
 		return PTR_ERR(vic->clk);
 	}
 
+	if (!dev->pm_domain) {
+		vic->rst = devm_reset_control_get(dev, "vic");
+		if (IS_ERR(vic->rst)) {
+			dev_err(&pdev->dev, "failed to get reset\n");
+			return PTR_ERR(vic->rst);
+		}
+	}
+
 	vic->falcon.dev = dev;
 	vic->falcon.regs = vic->regs;
 	vic->falcon.ops = &vic_falcon_ops;
@@ -418,3 +459,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
 #endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 33e5332..3dac08b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -140,7 +140,6 @@ static int tilcdc_commit(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = tilcdc_fb_create,
-	.output_poll_changed = drm_fb_helper_output_poll_changed,
 	.atomic_check = tilcdc_atomic_check,
 	.atomic_commit = tilcdc_commit,
 };
@@ -191,9 +190,6 @@ static void tilcdc_fini(struct drm_device *dev)
 		drm_dev_unregister(dev);
 
 	drm_kms_helper_poll_fini(dev);
-
-	drm_fb_cma_fbdev_fini(dev);
-
 	drm_irq_uninstall(dev);
 	drm_mode_config_cleanup(dev);
 	tilcdc_remove_external_device(dev);
@@ -396,16 +392,14 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
 
 	drm_mode_config_reset(ddev);
 
-	ret = drm_fb_cma_fbdev_init(ddev, bpp, 0);
-	if (ret)
-		goto init_failed;
-
 	drm_kms_helper_poll_init(ddev);
 
 	ret = drm_dev_register(ddev, 0);
 	if (ret)
 		goto init_failed;
 
+	drm_fbdev_generic_setup(ddev, bpp);
+
 	priv->is_registered = true;
 	return 0;
 
@@ -519,7 +513,6 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
 static struct drm_driver tilcdc_driver = {
 	.driver_features    = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
 			       DRIVER_PRIME | DRIVER_ATOMIC),
-	.lastclose          = drm_fb_helper_lastclose,
 	.irq_handler        = tilcdc_irq,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_print_info     = drm_gem_cma_print_info,
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 16f4b5c9..2c408ac 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -10,6 +10,17 @@
 config TINYDRM_MIPI_DBI
 	tristate
 
+config TINYDRM_HX8357D
+	tristate "DRM support for HX8357D display panels"
+	depends on DRM_TINYDRM && SPI
+	depends on BACKLIGHT_CLASS_DEVICE
+	select TINYDRM_MIPI_DBI
+	help
+	  DRM driver for the following HX8357D panels:
+	  * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
+
+	  If M is selected the module will be called hx8357d.
+
 config TINYDRM_ILI9225
 	tristate "DRM support for ILI9225 display panels"
 	depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 14d9908..f823066 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -4,6 +4,7 @@
 obj-$(CONFIG_TINYDRM_MIPI_DBI)		+= mipi-dbi.o
 
 # Displays
+obj-$(CONFIG_TINYDRM_HX8357D)		+= hx8357d.o
 obj-$(CONFIG_TINYDRM_ILI9225)		+= ili9225.o
 obj-$(CONFIG_TINYDRM_ILI9341)		+= ili9341.o
 obj-$(CONFIG_TINYDRM_MI0283QT)		+= mi0283qt.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 255341e..01a6f2d 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -36,77 +36,6 @@
  * and registers the DRM device using devm_tinydrm_register().
  */
 
-/**
- * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
- *     another driver's scatter/gather table of pinned pages
- * @drm: DRM device to import into
- * @attach: DMA-BUF attachment
- * @sgt: Scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table exported via DMA-BUF by
- * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
- * kernel virtual address on the CMA object. Drivers should use this as their
- * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
- * address. tinydrm_gem_cma_free_object() should be used in combination with
- * this function.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
-				      struct dma_buf_attachment *attach,
-				      struct sg_table *sgt)
-{
-	struct drm_gem_cma_object *cma_obj;
-	struct drm_gem_object *obj;
-	void *vaddr;
-
-	vaddr = dma_buf_vmap(attach->dmabuf);
-	if (!vaddr) {
-		DRM_ERROR("Failed to vmap PRIME buffer\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
-	if (IS_ERR(obj)) {
-		dma_buf_vunmap(attach->dmabuf, vaddr);
-		return obj;
-	}
-
-	cma_obj = to_drm_gem_cma_obj(obj);
-	cma_obj->vaddr = vaddr;
-
-	return obj;
-}
-EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
-
-/**
- * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
- *                               object
- * @gem_obj: GEM object to free
- *
- * This function frees the backing memory of the CMA GEM object, cleans up the
- * GEM object state and frees the memory used to store the object itself using
- * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
- * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object_unlocked callback.
- */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
-{
-	if (gem_obj->import_attach) {
-		struct drm_gem_cma_object *cma_obj;
-
-		cma_obj = to_drm_gem_cma_obj(gem_obj);
-		dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
-		cma_obj->vaddr = NULL;
-	}
-
-	drm_gem_cma_free_object(gem_obj);
-}
-EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
-
 static struct drm_framebuffer *
 tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
 		  const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -146,6 +75,7 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
 	drm->dev_private = tdev;
 	drm_mode_config_init(drm);
 	drm->mode_config.funcs = &tinydrm_mode_config_funcs;
+	drm->mode_config.allow_fb_modifiers = true;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index dcd3901..bf6bfbc 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -9,12 +9,18 @@
 
 #include <linux/backlight.h>
 #include <linux/dma-buf.h>
+#include <linux/module.h>
 #include <linux/pm.h>
 #include <linux/spi/spi.h>
 #include <linux/swab.h>
 
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
 #include <drm/tinydrm/tinydrm.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
 
 static unsigned int spi_max;
 module_param(spi_max, uint, 0400);
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 7e8e24d..eacfc0e 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -184,6 +184,10 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
 	struct drm_display_mode mode_copy;
 	struct drm_connector *connector;
 	int ret;
+	static const uint64_t modifiers[] = {
+		DRM_FORMAT_MOD_LINEAR,
+		DRM_FORMAT_MOD_INVALID
+	};
 
 	drm_mode_copy(&mode_copy, mode);
 	ret = tinydrm_rotate_mode(&mode_copy, rotation);
@@ -202,6 +206,6 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
 		return PTR_ERR(connector);
 
 	return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
-					    format_count, NULL, connector);
+					    format_count, modifiers, connector);
 }
 EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
new file mode 100644
index 0000000..81a2bbe
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for the HX8357D LCD controller
+ *
+ * Copyright 2018 Broadcom
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ * Copyright 2016 Noralf Trønnes
+ * Copyright (C) 2015 Adafruit Industries
+ * Copyright (C) 2013 Christian Vogelgsang
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define HX8357D_SETOSC 0xb0
+#define HX8357D_SETPOWER 0xb1
+#define HX8357D_SETRGB 0xb3
+#define HX8357D_SETCYC 0xb3
+#define HX8357D_SETCOM 0xb6
+#define HX8357D_SETEXTC 0xb9
+#define HX8357D_SETSTBA 0xc0
+#define HX8357D_SETPANEL 0xcc
+#define HX8357D_SETGAMMA 0xe0
+
+#define HX8357D_MADCTL_MY  0x80
+#define HX8357D_MADCTL_MX  0x40
+#define HX8357D_MADCTL_MV  0x20
+#define HX8357D_MADCTL_ML  0x10
+#define HX8357D_MADCTL_RGB 0x00
+#define HX8357D_MADCTL_BGR 0x08
+#define HX8357D_MADCTL_MH  0x04
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+			     struct drm_crtc_state *crtc_state,
+			     struct drm_plane_state *plane_state)
+{
+	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+	struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+	u8 addr_mode;
+	int ret;
+
+	DRM_DEBUG_KMS("\n");
+
+	ret = mipi_dbi_poweron_conditional_reset(mipi);
+	if (ret < 0)
+		return;
+	if (ret == 1)
+		goto out_enable;
+
+	/* setextc */
+	mipi_dbi_command(mipi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
+	msleep(150);
+
+	/* setRGB which also enables SDO */
+	mipi_dbi_command(mipi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
+
+	/* -1.52V */
+	mipi_dbi_command(mipi, HX8357D_SETCOM, 0x25);
+
+	/* Normal mode 70Hz, Idle mode 55 Hz */
+	mipi_dbi_command(mipi, HX8357D_SETOSC, 0x68);
+
+	/* Set Panel - BGR, Gate direction swapped */
+	mipi_dbi_command(mipi, HX8357D_SETPANEL, 0x05);
+
+	mipi_dbi_command(mipi, HX8357D_SETPOWER,
+			 0x00,  /* Not deep standby */
+			 0x15,  /* BT */
+			 0x1C,  /* VSPR */
+			 0x1C,  /* VSNR */
+			 0x83,  /* AP */
+			 0xAA);  /* FS */
+
+	mipi_dbi_command(mipi, HX8357D_SETSTBA,
+			 0x50,  /* OPON normal */
+			 0x50,  /* OPON idle */
+			 0x01,  /* STBA */
+			 0x3C,  /* STBA */
+			 0x1E,  /* STBA */
+			 0x08);  /* GEN */
+
+	mipi_dbi_command(mipi, HX8357D_SETCYC,
+			 0x02,  /* NW 0x02 */
+			 0x40,  /* RTN */
+			 0x00,  /* DIV */
+			 0x2A,  /* DUM */
+			 0x2A,  /* DUM */
+			 0x0D,  /* GDON */
+			 0x78);  /* GDOFF */
+
+	mipi_dbi_command(mipi, HX8357D_SETGAMMA,
+			 0x02,
+			 0x0A,
+			 0x11,
+			 0x1d,
+			 0x23,
+			 0x35,
+			 0x41,
+			 0x4b,
+			 0x4b,
+			 0x42,
+			 0x3A,
+			 0x27,
+			 0x1B,
+			 0x08,
+			 0x09,
+			 0x03,
+			 0x02,
+			 0x0A,
+			 0x11,
+			 0x1d,
+			 0x23,
+			 0x35,
+			 0x41,
+			 0x4b,
+			 0x4b,
+			 0x42,
+			 0x3A,
+			 0x27,
+			 0x1B,
+			 0x08,
+			 0x09,
+			 0x03,
+			 0x00,
+			 0x01);
+
+	/* 16 bit */
+	mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+			 MIPI_DCS_PIXEL_FMT_16BIT);
+
+	/* TE off */
+	mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_ON, 0x00);
+
+	/* tear line */
+	mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
+
+	/* Exit Sleep */
+	mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+	msleep(150);
+
+	/* display on */
+	mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+	usleep_range(5000, 7000);
+
+out_enable:
+	switch (mipi->rotation) {
+	default:
+		addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
+		break;
+	case 90:
+		addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MY;
+		break;
+	case 180:
+		addr_mode = 0;
+		break;
+	case 270:
+		addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
+		break;
+	}
+	mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
+	.enable = yx240qv29_enable,
+	.disable = mipi_dbi_pipe_disable,
+	.update = tinydrm_display_pipe_update,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx350hv15_mode = {
+	TINYDRM_MODE(320, 480, 60, 75),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+
+static struct drm_driver hx8357d_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+	.fops			= &hx8357d_fops,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
+	.debugfs_init		= mipi_dbi_debugfs_init,
+	.name			= "hx8357d",
+	.desc			= "HX8357D",
+	.date			= "20181023",
+	.major			= 1,
+	.minor			= 0,
+};
+
+static const struct of_device_id hx8357d_of_match[] = {
+	{ .compatible = "adafruit,yx350hv15" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, hx8357d_of_match);
+
+static const struct spi_device_id hx8357d_id[] = {
+	{ "yx350hv15", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, hx8357d_id);
+
+static int hx8357d_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct mipi_dbi *mipi;
+	struct gpio_desc *dc;
+	u32 rotation = 0;
+	int ret;
+
+	mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+	if (!mipi)
+		return -ENOMEM;
+
+	dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+	if (IS_ERR(dc)) {
+		DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+		return PTR_ERR(dc);
+	}
+
+	mipi->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(mipi->backlight))
+		return PTR_ERR(mipi->backlight);
+
+	device_property_read_u32(dev, "rotation", &rotation);
+
+	ret = mipi_dbi_spi_init(spi, mipi, dc);
+	if (ret)
+		return ret;
+
+	ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
+			    &hx8357d_driver, &yx350hv15_mode, rotation);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, mipi);
+
+	return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+	struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+	tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver hx8357d_spi_driver = {
+	.driver = {
+		.name = "hx8357d",
+		.of_match_table = hx8357d_of_match,
+	},
+	.id_table = hx8357d_id,
+	.probe = hx8357d_probe,
+	.shutdown = hx8357d_shutdown,
+};
+module_spi_driver(hx8357d_spi_driver);
+
+MODULE_DESCRIPTION("HX8357D DRM driver");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe..78f7c2d 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,7 +20,8 @@
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
@@ -367,7 +368,7 @@ static struct drm_driver ili9225_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
 	.fops			= &ili9225_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.name			= "ili9225",
 	.desc			= "Ilitek ILI9225",
 	.date			= "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 6701037..51395bd 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,7 @@
 #include <linux/property.h>
 #include <linux/spi/spi.h>
 
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
@@ -144,7 +144,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
 static struct drm_driver ili9341_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
 	.fops			= &ili9341_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.debugfs_init		= mipi_dbi_debugfs_init,
 	.name			= "ili9341",
 	.desc			= "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d7bb4c5..3fa62e7 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,9 +17,9 @@
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 #include <video/mipi_display.h>
@@ -153,7 +153,7 @@ static struct drm_driver mi0283qt_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
 	.fops			= &mi0283qt_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.debugfs_init		= mipi_dbi_debugfs_init,
 	.name			= "mi0283qt",
 	.desc			= "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index cb3441e..3a05e56 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,15 +9,19 @@
  * (at your option) any later version.
  */
 
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
+
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
 #include <video/mipi_display.h>
 
 #define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -240,10 +244,10 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
 
 	mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
 			 (clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF,
-			 (clip.x2 >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
+			 ((clip.x2 - 1) >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
 	mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
 			 (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
-			 (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+			 ((clip.y2 - 1) >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
 
 	ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
 				(clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 50a1d42..54d6fe0 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,8 @@
 #include <linux/spi/spi.h>
 #include <linux/thermal.h>
 
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/tinydrm.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
@@ -106,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header,
 
 	/* Stack allocated tx? */
 	if (tx && len <= 32) {
-		txbuf = kmalloc(len, GFP_KERNEL);
+		txbuf = kmemdup(tx, len, GFP_KERNEL);
 		if (!txbuf) {
 			ret = -ENOMEM;
 			goto out_free;
 		}
-		memcpy(txbuf, tx, len);
 	}
 
 	if (rx) {
@@ -882,7 +883,7 @@ static struct drm_driver repaper_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
 	.fops			= &repaper_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.name			= "repaper",
 	.desc			= "Pervasive Displays RePaper e-ink panels",
 	.date			= "20170405",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 2fcbc30..a6a8a10 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,7 +17,8 @@
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
@@ -303,7 +304,7 @@ static struct drm_driver st7586_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
 	.fops			= &st7586_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.debugfs_init		= mipi_dbi_debugfs_init,
 	.name			= "st7586",
 	.desc			= "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3081bc5..b39779e 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,7 @@
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
 
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
@@ -119,7 +119,7 @@ static struct drm_driver st7735r_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
 	.fops			= &st7735r_fops,
-	TINYDRM_GEM_DRIVER_OPS,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
 	.debugfs_init		= mipi_dbi_debugfs_init,
 	.name			= "st7735r",
 	.desc			= "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 26b889f..d87935b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,14 @@
 
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
+/**
+ * ttm_global_mutex - protecting the global BO state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+struct ttm_bo_global ttm_bo_glob = {
+	.use_count = 0
+};
+
 static struct attribute ttm_bo_count = {
 	.name = "bo_count",
 	.mode = S_IRUGO
@@ -872,7 +880,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 	if (fence) {
 		reservation_object_add_shared_fence(bo->resv, fence);
 
-		ret = reservation_object_reserve_shared(bo->resv);
+		ret = reservation_object_reserve_shared(bo->resv, 1);
 		if (unlikely(ret))
 			return ret;
 
@@ -977,7 +985,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	bool has_erestartsys = false;
 	int i, ret;
 
-	ret = reservation_object_reserve_shared(bo->resv);
+	ret = reservation_object_reserve_shared(bo->resv, 1);
 	if (unlikely(ret))
 		return ret;
 
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
 		container_of(kobj, struct ttm_bo_global, kobj);
 
 	__free_page(glob->dummy_read_page);
-	kfree(glob);
 }
 
-void ttm_bo_global_release(struct drm_global_reference *ref)
+static void ttm_bo_global_release(void)
 {
-	struct ttm_bo_global *glob = ref->object;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+
+	mutex_lock(&ttm_global_mutex);
+	if (--glob->use_count > 0)
+		goto out;
 
 	kobject_del(&glob->kobj);
 	kobject_put(&glob->kobj);
+	ttm_mem_global_release(&ttm_mem_glob);
+out:
+	mutex_unlock(&ttm_global_mutex);
 }
-EXPORT_SYMBOL(ttm_bo_global_release);
 
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static int ttm_bo_global_init(void)
 {
-	struct ttm_bo_global_ref *bo_ref =
-		container_of(ref, struct ttm_bo_global_ref, ref);
-	struct ttm_bo_global *glob = ref->object;
-	int ret;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+	int ret = 0;
 	unsigned i;
 
-	mutex_init(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
+	if (++glob->use_count > 1)
+		goto out;
+
+	ret = ttm_mem_global_init(&ttm_mem_glob);
+	if (ret)
+		goto out;
+
 	spin_lock_init(&glob->lru_lock);
-	glob->mem_glob = bo_ref->mem_glob;
+	glob->mem_glob = &ttm_mem_glob;
 	glob->mem_glob->bo_glob = glob;
 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
 	if (unlikely(glob->dummy_read_page == NULL)) {
 		ret = -ENOMEM;
-		goto out_no_drp;
+		goto out;
 	}
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
 		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
 	if (unlikely(ret != 0))
 		kobject_put(&glob->kobj);
-	return ret;
-out_no_drp:
-	kfree(glob);
+out:
+	mutex_unlock(&ttm_global_mutex);
 	return ret;
 }
-EXPORT_SYMBOL(ttm_bo_global_init);
-
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 		}
 	}
 
-	mutex_lock(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
 	list_del(&bdev->device_list);
-	mutex_unlock(&glob->device_list_mutex);
+	mutex_unlock(&ttm_global_mutex);
 
 	cancel_delayed_work_sync(&bdev->wq);
 
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 
 	drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
+	if (!ret)
+		ttm_bo_global_release();
+
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_release);
 
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
-		       struct ttm_bo_global *glob,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       uint64_t file_page_offset,
 		       bool need_dma32)
 {
-	int ret = -EINVAL;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
+	int ret;
+
+	ret = ttm_bo_global_init();
+	if (ret)
+		return ret;
 
 	bdev->driver = driver;
 
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	bdev->dev_mapping = mapping;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
-	mutex_lock(&glob->device_list_mutex);
+	mutex_lock(&ttm_global_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
-	mutex_unlock(&glob->device_list_mutex);
+	mutex_unlock(&ttm_global_mutex);
 
 	return 0;
 out_no_sys:
+	ttm_bo_global_release();
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_init);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e73ae0d..9386034 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 		}
 
 		if (!ret) {
-			if (!entry->shared)
+			if (!entry->num_shared)
 				continue;
 
-			ret = reservation_object_reserve_shared(bo->resv);
+			ret = reservation_object_reserve_shared(bo->resv,
+								entry->num_shared);
 			if (!ret)
 				continue;
 		}
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 			}
 		}
 
-		if (!ret && entry->shared)
-			ret = reservation_object_reserve_shared(bo->resv);
+		if (!ret && entry->num_shared)
+			ret = reservation_object_reserve_shared(bo->resv,
+								entry->num_shared);
 
 		if (unlikely(ret != 0)) {
 			if (ret == -EINTR)
@@ -187,21 +189,19 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 	struct ttm_buffer_object *bo;
 	struct ttm_bo_global *glob;
 	struct ttm_bo_device *bdev;
-	struct ttm_bo_driver *driver;
 
 	if (list_empty(list))
 		return;
 
 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
 	bdev = bo->bdev;
-	driver = bdev->driver;
 	glob = bo->bdev->glob;
 
 	spin_lock(&glob->lru_lock);
 
 	list_for_each_entry(entry, list, head) {
 		bo = entry->bo;
-		if (entry->shared)
+		if (entry->num_shared)
 			reservation_object_add_shared_fence(bo->resv, fence);
 		else
 			reservation_object_add_excl_fence(bo->resv, fence);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 450387c..f1567c3 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -41,6 +41,9 @@
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
+struct ttm_mem_global ttm_mem_glob;
+EXPORT_SYMBOL(ttm_mem_glob);
+
 struct ttm_mem_zone {
 	struct kobject kobj;
 	struct ttm_mem_global *glob;
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
 	return size;
 }
 
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
-	struct ttm_mem_global *glob =
-		container_of(kobj, struct ttm_mem_global, kobj);
-
-	kfree(glob);
-}
-
 static struct attribute *ttm_mem_global_attrs[] = {
 	&ttm_mem_global_lower_mem_limit,
 	NULL
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
 };
 
 static struct kobj_type ttm_mem_glob_kobj_type = {
-	.release = &ttm_mem_global_kobj_release,
 	.sysfs_ops = &ttm_mem_global_ops,
 	.default_attrs = ttm_mem_global_attrs,
 };
@@ -464,7 +458,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 	ttm_mem_global_release(glob);
 	return ret;
 }
-EXPORT_SYMBOL(ttm_mem_global_init);
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
 	kobject_del(&glob->kobj);
 	kobject_put(&glob->kobj);
 }
-EXPORT_SYMBOL(ttm_mem_global_release);
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
 {
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 72efcec..28e2d03 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -249,7 +249,7 @@ static int tve200_probe(struct platform_device *pdev)
 clk_disable:
 	clk_disable_unprepare(priv->pclk);
 dev_unref:
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 	return ret;
 }
 
@@ -263,7 +263,7 @@ static int tve200_remove(struct platform_device *pdev)
 		drm_panel_bridge_remove(priv->bridge);
 	drm_mode_config_cleanup(drm);
 	clk_disable_unprepare(priv->pclk);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f09..1b014d9 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
 	if (ret)
 		goto err;
 
-	ret = drm_vblank_init(dev, 1);
-	if (ret)
-		goto err_fb;
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
-err_fb:
-	udl_fbdev_cleanup(dev);
+
 err:
 	if (udl->urbs.count)
 		udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d9651..a08766d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
 	bo->resv = attach->dmabuf->resv;
 
 	bo->sgt = sgt;
+	obj->import_attach = attach;
 	v3d_bo_get_pages(bo);
 
 	v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c5..eb2b2d2 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
 			   V3D_READ(v3d_hub_reg_defs[i].reg));
 	}
 
-	for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
-		seq_printf(m, "%s (0x%04x): 0x%08x\n",
-			   v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
-			   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+	if (v3d->ver < 41) {
+		for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+			seq_printf(m, "%s (0x%04x): 0x%08x\n",
+				   v3d_gca_reg_defs[i].name,
+				   v3d_gca_reg_defs[i].reg,
+				   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+		}
 	}
 
 	for (core = 0; core < v3d->cores; core++) {
@@ -176,9 +179,44 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
 	return 0;
 }
 
+static int v3d_measure_clock(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	uint32_t cycles;
+	int core = 0;
+	int measure_ms = 1000;
+
+	if (v3d->ver >= 40) {
+		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
+			       V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
+					     V3D_PCTR_S0));
+		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
+		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
+	} else {
+		V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
+			       V3D_PCTR_CYCLE_COUNT);
+		V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
+		V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
+			       V3D_V3_PCTR_0_EN_ENABLE |
+			       1);
+	}
+	msleep(measure_ms);
+	cycles = V3D_CORE_READ(core, V3D_PCTR_0_PCTR0);
+
+	seq_printf(m, "cycles: %d (%d.%d Mhz)\n",
+		   cycles,
+		   cycles / (measure_ms * 1000),
+		   (cycles / (measure_ms * 100)) % 10);
+
+	return 0;
+}
+
 static const struct drm_info_list v3d_debugfs_list[] = {
 	{"v3d_ident", v3d_v3d_debugfs_ident, 0},
 	{"v3d_regs", v3d_v3d_debugfs_regs, 0},
+	{"measure_clock", v3d_measure_clock, 0},
 	{"bo_stats", v3d_debugfs_bo_stats, 0},
 };
 
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 2a85fa68f..f0afcec 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
 		return 0;
 	}
 
-	/* Any params that aren't just register reads would go here. */
 
-	DRM_DEBUG("Unknown parameter %d\n", args->param);
-	return -EINVAL;
+	switch (args->param) {
+	case DRM_V3D_PARAM_SUPPORTS_TFU:
+		args->value = 1;
+		return 0;
+	default:
+		DRM_DEBUG("Unknown parameter %d\n", args->param);
+		return -EINVAL;
+	}
 }
 
 static int
@@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = {
 /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
  * protection between clients.  Note that render nodes would be be
  * able to submit CLs that could access BOs from clients authenticated
- * with the master node.
+ * with the master node.  The TFU doesn't use the GMP, so it would
+ * need to stay DRM_AUTH until we do buffer size/offset validation.
  */
 static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
@@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
 };
 
 static const struct vm_operations_struct v3d_vm_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index e6fed69..dcb772a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -7,19 +7,18 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem.h>
 #include <drm/gpu_scheduler.h>
+#include "uapi/drm/v3d_drm.h"
 
 #define GMP_GRANULARITY (128 * 1024)
 
-/* Enum for each of the V3D queues.  We maintain various queue
- * tracking as an array because at some point we'll want to support
- * the TFU (texture formatting unit) as another queue.
- */
+/* Enum for each of the V3D queues. */
 enum v3d_queue {
 	V3D_BIN,
 	V3D_RENDER,
+	V3D_TFU,
 };
 
-#define V3D_MAX_QUEUES (V3D_RENDER + 1)
+#define V3D_MAX_QUEUES (V3D_TFU + 1)
 
 struct v3d_queue_state {
 	struct drm_gpu_scheduler sched;
@@ -68,6 +67,7 @@ struct v3d_dev {
 
 	struct v3d_exec_info *bin_job;
 	struct v3d_exec_info *render_job;
+	struct v3d_tfu_job *tfu_job;
 
 	struct v3d_queue_state queue[V3D_MAX_QUEUES];
 
@@ -198,6 +198,11 @@ struct v3d_exec_info {
 	 */
 	struct dma_fence *bin_done_fence;
 
+	/* Fence for when the scheduler considers the render to be
+	 * done, for when the BOs reservations should be complete.
+	 */
+	struct dma_fence *render_done_fence;
+
 	struct kref refcount;
 
 	/* This is the array of BOs that were looked up at the start of exec. */
@@ -213,6 +218,25 @@ struct v3d_exec_info {
 	u32 qma, qms, qts;
 };
 
+struct v3d_tfu_job {
+	struct drm_sched_job base;
+
+	struct drm_v3d_submit_tfu args;
+
+	/* An optional fence userspace can pass in for the job to depend on. */
+	struct dma_fence *in_fence;
+
+	/* v3d fence to be signaled by IRQ handler when the job is complete. */
+	struct dma_fence *done_fence;
+
+	struct v3d_dev *v3d;
+
+	struct kref refcount;
+
+	/* This is the array of BOs that were looked up at the start of exec. */
+	struct v3d_bo *bo[4];
+};
+
 /**
  * _wait_for - magic (register) wait macro
  *
@@ -276,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev);
 void v3d_gem_destroy(struct drm_device *dev);
 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
 void v3d_exec_put(struct v3d_exec_info *exec);
+void v3d_tfu_job_put(struct v3d_tfu_job *exec);
 void v3d_reset(struct v3d_dev *v3d);
 void v3d_invalidate_caches(struct v3d_dev *v3d);
 void v3d_flush_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 50bfcf9..b0a2a1a 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
 {
 	struct v3d_fence *f = to_v3d_fence(fence);
 
-	if (f->queue == V3D_BIN)
+	switch (f->queue) {
+	case V3D_BIN:
 		return "v3d-bin";
-	else
+	case V3D_RENDER:
 		return "v3d-render";
+	case V3D_TFU:
+		return "v3d-tfu";
+	default:
+		return NULL;
+	}
 }
 
 const struct dma_fence_ops v3d_fence_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 70c5477..05ca631 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -207,32 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d)
 }
 
 static void
-v3d_attach_object_fences(struct v3d_exec_info *exec)
+v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
+			 struct dma_fence *fence)
 {
-	struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
-	struct v3d_bo *bo;
 	int i;
 
-	for (i = 0; i < exec->bo_count; i++) {
-		bo = to_v3d_bo(&exec->bo[i]->base);
-
+	for (i = 0; i < bo_count; i++) {
 		/* XXX: Use shared fences for read-only objects. */
-		reservation_object_add_excl_fence(bo->resv, out_fence);
+		reservation_object_add_excl_fence(bos[i]->resv, fence);
 	}
 }
 
 static void
-v3d_unlock_bo_reservations(struct drm_device *dev,
-			   struct v3d_exec_info *exec,
+v3d_unlock_bo_reservations(struct v3d_bo **bos,
+			   int bo_count,
 			   struct ww_acquire_ctx *acquire_ctx)
 {
 	int i;
 
-	for (i = 0; i < exec->bo_count; i++) {
-		struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
-
-		ww_mutex_unlock(&bo->resv->lock);
-	}
+	for (i = 0; i < bo_count; i++)
+		ww_mutex_unlock(&bos[i]->resv->lock);
 
 	ww_acquire_fini(acquire_ctx);
 }
@@ -245,19 +239,19 @@ v3d_unlock_bo_reservations(struct drm_device *dev,
  * to v3d, so we don't attach dma-buf fences to them.
  */
 static int
-v3d_lock_bo_reservations(struct drm_device *dev,
-			 struct v3d_exec_info *exec,
+v3d_lock_bo_reservations(struct v3d_bo **bos,
+			 int bo_count,
 			 struct ww_acquire_ctx *acquire_ctx)
 {
 	int contended_lock = -1;
 	int i, ret;
-	struct v3d_bo *bo;
 
 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
 
 retry:
 	if (contended_lock != -1) {
-		bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+		struct v3d_bo *bo = bos[contended_lock];
+
 		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
 						       acquire_ctx);
 		if (ret) {
@@ -266,23 +260,20 @@ v3d_lock_bo_reservations(struct drm_device *dev,
 		}
 	}
 
-	for (i = 0; i < exec->bo_count; i++) {
+	for (i = 0; i < bo_count; i++) {
 		if (i == contended_lock)
 			continue;
 
-		bo = to_v3d_bo(&exec->bo[i]->base);
-
-		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+		ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
+						  acquire_ctx);
 		if (ret) {
 			int j;
 
-			for (j = 0; j < i; j++) {
-				bo = to_v3d_bo(&exec->bo[j]->base);
-				ww_mutex_unlock(&bo->resv->lock);
-			}
+			for (j = 0; j < i; j++)
+				ww_mutex_unlock(&bos[j]->resv->lock);
 
 			if (contended_lock != -1 && contended_lock >= i) {
-				bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+				struct v3d_bo *bo = bos[contended_lock];
 
 				ww_mutex_unlock(&bo->resv->lock);
 			}
@@ -302,12 +293,11 @@ v3d_lock_bo_reservations(struct drm_device *dev,
 	/* Reserve space for our shared (read-only) fence references,
 	 * before we commit the CL to the hardware.
 	 */
-	for (i = 0; i < exec->bo_count; i++) {
-		bo = to_v3d_bo(&exec->bo[i]->base);
-
-		ret = reservation_object_reserve_shared(bo->resv);
+	for (i = 0; i < bo_count; i++) {
+		ret = reservation_object_reserve_shared(bos[i]->resv, 1);
 		if (ret) {
-			v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
+			v3d_unlock_bo_reservations(bos, bo_count,
+						   acquire_ctx);
 			return ret;
 		}
 	}
@@ -409,6 +399,7 @@ v3d_exec_cleanup(struct kref *ref)
 	dma_fence_put(exec->render.done_fence);
 
 	dma_fence_put(exec->bin_done_fence);
+	dma_fence_put(exec->render_done_fence);
 
 	for (i = 0; i < exec->bo_count; i++)
 		drm_gem_object_put_unlocked(&exec->bo[i]->base);
@@ -429,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec)
 	kref_put(&exec->refcount, v3d_exec_cleanup);
 }
 
+static void
+v3d_tfu_job_cleanup(struct kref *ref)
+{
+	struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
+					       refcount);
+	struct v3d_dev *v3d = job->v3d;
+	unsigned int i;
+
+	dma_fence_put(job->in_fence);
+	dma_fence_put(job->done_fence);
+
+	for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
+		if (job->bo[i])
+			drm_gem_object_put_unlocked(&job->bo[i]->base);
+	}
+
+	pm_runtime_mark_last_busy(v3d->dev);
+	pm_runtime_put_autosuspend(v3d->dev);
+
+	kfree(job);
+}
+
+void v3d_tfu_job_put(struct v3d_tfu_job *job)
+{
+	kref_put(&job->refcount, v3d_tfu_job_cleanup);
+}
+
 int
 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
@@ -503,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	struct drm_syncobj *sync_out;
 	int ret = 0;
 
+	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
 	if (args->pad != 0) {
 		DRM_INFO("pad must be zero: %d\n", args->pad);
 		return -EINVAL;
@@ -521,12 +541,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	kref_init(&exec->refcount);
 
 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
-				     0, &exec->bin.in_fence);
+				     0, 0, &exec->bin.in_fence);
 	if (ret == -EINVAL)
 		goto fail;
 
 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
-				     0, &exec->render.in_fence);
+				     0, 0, &exec->render.in_fence);
 	if (ret == -EINVAL)
 		goto fail;
 
@@ -546,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto fail;
 
-	ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
+	ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
+				       &acquire_ctx);
 	if (ret)
 		goto fail;
 
@@ -572,20 +593,23 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto fail_unreserve;
 
+	exec->render_done_fence =
+		dma_fence_get(&exec->render.base.s_fence->finished);
+
 	kref_get(&exec->refcount); /* put by scheduler job completion */
 	drm_sched_entity_push_job(&exec->render.base,
 				  &v3d_priv->sched_entity[V3D_RENDER]);
 	mutex_unlock(&v3d->sched_lock);
 
-	v3d_attach_object_fences(exec);
+	v3d_attach_object_fences(exec->bo, exec->bo_count,
+				 exec->render_done_fence);
 
-	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+	v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
 
 	/* Update the return sync object for the */
 	sync_out = drm_syncobj_find(file_priv, args->out_sync);
 	if (sync_out) {
-		drm_syncobj_replace_fence(sync_out, 0,
-					  &exec->render.base.s_fence->finished);
+		drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
 		drm_syncobj_put(sync_out);
 	}
 
@@ -595,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
 fail_unreserve:
 	mutex_unlock(&v3d->sched_lock);
-	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+	v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
 fail:
 	v3d_exec_put(exec);
 
 	return ret;
 }
 
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	struct drm_v3d_submit_tfu *args = data;
+	struct v3d_tfu_job *job;
+	struct ww_acquire_ctx acquire_ctx;
+	struct drm_syncobj *sync_out;
+	struct dma_fence *sched_done_fence;
+	int ret = 0;
+	int bo_count;
+
+	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+	job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+	if (!job)
+		return -ENOMEM;
+
+	ret = pm_runtime_get_sync(v3d->dev);
+	if (ret < 0) {
+		kfree(job);
+		return ret;
+	}
+
+	kref_init(&job->refcount);
+
+	ret = drm_syncobj_find_fence(file_priv, args->in_sync,
+				     0, 0, &job->in_fence);
+	if (ret == -EINVAL)
+		goto fail;
+
+	job->args = *args;
+	job->v3d = v3d;
+
+	spin_lock(&file_priv->table_lock);
+	for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
+		struct drm_gem_object *bo;
+
+		if (!args->bo_handles[bo_count])
+			break;
+
+		bo = idr_find(&file_priv->object_idr,
+			      args->bo_handles[bo_count]);
+		if (!bo) {
+			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+				  bo_count, args->bo_handles[bo_count]);
+			ret = -ENOENT;
+			spin_unlock(&file_priv->table_lock);
+			goto fail;
+		}
+		drm_gem_object_get(bo);
+		job->bo[bo_count] = to_v3d_bo(bo);
+	}
+	spin_unlock(&file_priv->table_lock);
+
+	ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+	if (ret)
+		goto fail;
+
+	mutex_lock(&v3d->sched_lock);
+	ret = drm_sched_job_init(&job->base,
+				 &v3d_priv->sched_entity[V3D_TFU],
+				 v3d_priv);
+	if (ret)
+		goto fail_unreserve;
+
+	sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+	kref_get(&job->refcount); /* put by scheduler job completion */
+	drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
+	mutex_unlock(&v3d->sched_lock);
+
+	v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
+
+	v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+
+	/* Update the return sync object */
+	sync_out = drm_syncobj_find(file_priv, args->out_sync);
+	if (sync_out) {
+		drm_syncobj_replace_fence(sync_out, sched_done_fence);
+		drm_syncobj_put(sync_out);
+	}
+	dma_fence_put(sched_done_fence);
+
+	v3d_tfu_job_put(job);
+
+	return 0;
+
+fail_unreserve:
+	mutex_unlock(&v3d->sched_lock);
+	v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+fail:
+	v3d_tfu_job_put(job);
+
+	return ret;
+}
+
 int
 v3d_gem_init(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e07514e..69338da 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -4,8 +4,8 @@
 /**
  * DOC: Interrupt management for the V3D engine
  *
- * When we take a binning or rendering flush done interrupt, we need
- * to signal the fence for that job so that the scheduler can queue up
+ * When we take a bin, render, or TFU done interrupt, we need to
+ * signal the fence for that job so that the scheduler can queue up
  * the next one and unblock any waiters.
  *
  * When we take the binner out of memory interrupt, we need to
@@ -15,6 +15,7 @@
 
 #include "v3d_drv.h"
 #include "v3d_regs.h"
+#include "v3d_trace.h"
 
 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |	\
 			     V3D_INT_FLDONE |	\
@@ -23,7 +24,8 @@
 
 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |	\
 			    V3D_HUB_INT_MMU_PTI |	\
-			    V3D_HUB_INT_MMU_CAP))
+			    V3D_HUB_INT_MMU_CAP |	\
+			    V3D_HUB_INT_TFUC))
 
 static void
 v3d_overflow_mem_work(struct work_struct *work)
@@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg)
 	}
 
 	if (intsts & V3D_INT_FLDONE) {
-		dma_fence_signal(v3d->bin_job->bin.done_fence);
+		struct v3d_fence *fence =
+			to_v3d_fence(v3d->bin_job->bin.done_fence);
+
+		trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+		dma_fence_signal(&fence->base);
 		status = IRQ_HANDLED;
 	}
 
 	if (intsts & V3D_INT_FRDONE) {
-		dma_fence_signal(v3d->render_job->render.done_fence);
+		struct v3d_fence *fence =
+			to_v3d_fence(v3d->render_job->render.done_fence);
+
+		trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+		dma_fence_signal(&fence->base);
 		status = IRQ_HANDLED;
 	}
 
@@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg)
 	/* Acknowledge the interrupts we're handling here. */
 	V3D_WRITE(V3D_HUB_INT_CLR, intsts);
 
+	if (intsts & V3D_HUB_INT_TFUC) {
+		struct v3d_fence *fence =
+			to_v3d_fence(v3d->tfu_job->done_fence);
+
+		trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+		dma_fence_signal(&fence->base);
+		status = IRQ_HANDLED;
+	}
+
 	if (intsts & (V3D_HUB_INT_MMU_WRV |
 		      V3D_HUB_INT_MMU_PTI |
 		      V3D_HUB_INT_MMU_CAP)) {
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 8540465..6ccdee9 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -86,6 +86,55 @@
 # define V3D_TOP_GR_BRIDGE_SW_INIT_1                   0x0000c
 # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
 
+#define V3D_TFU_CS                                     0x00400
+/* Stops current job, empties input fifo. */
+# define V3D_TFU_CS_TFURST                             BIT(31)
+# define V3D_TFU_CS_CVTCT_MASK                         V3D_MASK(23, 16)
+# define V3D_TFU_CS_CVTCT_SHIFT                        16
+# define V3D_TFU_CS_NFREE_MASK                         V3D_MASK(13, 8)
+# define V3D_TFU_CS_NFREE_SHIFT                        8
+# define V3D_TFU_CS_BUSY                               BIT(0)
+
+#define V3D_TFU_SU                                     0x00404
+/* Interrupt when FINTTHR input slots are free (0 = disabled) */
+# define V3D_TFU_SU_FINTTHR_MASK                       V3D_MASK(13, 8)
+# define V3D_TFU_SU_FINTTHR_SHIFT                      8
+/* Skips resetting the CRC at the start of CRC generation. */
+# define V3D_TFU_SU_CRCCHAIN                           BIT(4)
+/* skips writes, computes CRC of the image.  miplevels must be 0. */
+# define V3D_TFU_SU_CRC                                BIT(3)
+# define V3D_TFU_SU_THROTTLE_MASK                      V3D_MASK(1, 0)
+# define V3D_TFU_SU_THROTTLE_SHIFT                     0
+
+#define V3D_TFU_ICFG                                   0x00408
+/* Interrupt when the conversion is complete. */
+# define V3D_TFU_ICFG_IOC                              BIT(0)
+
+/* Input Image Address */
+#define V3D_TFU_IIA                                    0x0040c
+/* Input Chroma Address */
+#define V3D_TFU_ICA                                    0x00410
+/* Input Image Stride */
+#define V3D_TFU_IIS                                    0x00414
+/* Input Image U-Plane Address */
+#define V3D_TFU_IUA                                    0x00418
+/* Output Image Address */
+#define V3D_TFU_IOA                                    0x0041c
+/* Image Output Size */
+#define V3D_TFU_IOS                                    0x00420
+/* TFU YUV Coefficient 0 */
+#define V3D_TFU_COEF0                                  0x00424
+/* Use these regs instead of the defaults. */
+# define V3D_TFU_COEF0_USECOEF                         BIT(31)
+/* TFU YUV Coefficient 1 */
+#define V3D_TFU_COEF1                                  0x00428
+/* TFU YUV Coefficient 2 */
+#define V3D_TFU_COEF2                                  0x0042c
+/* TFU YUV Coefficient 3 */
+#define V3D_TFU_COEF3                                  0x00430
+
+#define V3D_TFU_CRC                                    0x00434
+
 /* Per-MMU registers. */
 
 #define V3D_MMUC_CONTROL                               0x01000
@@ -267,6 +316,36 @@
 # define V3D_PTB_BXCF_RWORDERDISA                      BIT(1)
 # define V3D_PTB_BXCF_CLIPDISA                         BIT(0)
 
+#define V3D_V3_PCTR_0_EN                               0x00674
+#define V3D_V3_PCTR_0_EN_ENABLE                        BIT(31)
+#define V3D_V4_PCTR_0_EN                               0x00650
+/* When a bit is set, resets the counter to 0. */
+#define V3D_V3_PCTR_0_CLR                              0x00670
+#define V3D_V4_PCTR_0_CLR                              0x00654
+#define V3D_PCTR_0_OVERFLOW                            0x00658
+
+#define V3D_V3_PCTR_0_PCTRS0                           0x00684
+#define V3D_V3_PCTR_0_PCTRS15                          0x00660
+#define V3D_V3_PCTR_0_PCTRSX(x)                        (V3D_V3_PCTR_0_PCTRS0 + \
+							4 * (x))
+/* Each src reg muxes four counters each. */
+#define V3D_V4_PCTR_0_SRC_0_3                          0x00660
+#define V3D_V4_PCTR_0_SRC_28_31                        0x0067c
+# define V3D_PCTR_S0_MASK                              V3D_MASK(6, 0)
+# define V3D_PCTR_S0_SHIFT                             0
+# define V3D_PCTR_S1_MASK                              V3D_MASK(14, 8)
+# define V3D_PCTR_S1_SHIFT                             8
+# define V3D_PCTR_S2_MASK                              V3D_MASK(22, 16)
+# define V3D_PCTR_S2_SHIFT                             16
+# define V3D_PCTR_S3_MASK                              V3D_MASK(30, 24)
+# define V3D_PCTR_S3_SHIFT                             24
+# define V3D_PCTR_CYCLE_COUNT                          32
+
+/* Output values of the counters. */
+#define V3D_PCTR_0_PCTR0                               0x00680
+#define V3D_PCTR_0_PCTR31                              0x006fc
+#define V3D_PCTR_0_PCTRX(x)                            (V3D_PCTR_0_PCTR0 + \
+							4 * (x))
 #define V3D_GMP_STATUS                                 0x00800
 # define V3D_GMP_STATUS_GMPRST                         BIT(31)
 # define V3D_GMP_STATUS_WR_COUNT_MASK                  V3D_MASK(30, 24)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9243dea..f7508e9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -30,16 +30,34 @@ to_v3d_job(struct drm_sched_job *sched_job)
 	return container_of(sched_job, struct v3d_job, base);
 }
 
+static struct v3d_tfu_job *
+to_tfu_job(struct drm_sched_job *sched_job)
+{
+	return container_of(sched_job, struct v3d_tfu_job, base);
+}
+
 static void
 v3d_job_free(struct drm_sched_job *sched_job)
 {
 	struct v3d_job *job = to_v3d_job(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	v3d_exec_put(job->exec);
 }
 
+static void
+v3d_tfu_job_free(struct drm_sched_job *sched_job)
+{
+	struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+	drm_sched_job_cleanup(sched_job);
+
+	v3d_tfu_job_put(job);
+}
+
 /**
- * Returns the fences that the bin job depends on, one by one.
+ * Returns the fences that the bin or render job depends on, one by one.
  * v3d_job_run() won't be called until all of them have been signaled.
  */
 static struct dma_fence *
@@ -76,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
 	return fence;
 }
 
+/**
+ * Returns the fences that the TFU job depends on, one by one.
+ * v3d_tfu_job_run() won't be called until all of them have been
+ * signaled.
+ */
+static struct dma_fence *
+v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
+		       struct drm_sched_entity *s_entity)
+{
+	struct v3d_tfu_job *job = to_tfu_job(sched_job);
+	struct dma_fence *fence;
+
+	fence = job->in_fence;
+	if (fence) {
+		job->in_fence = NULL;
+		return fence;
+	}
+
+	return NULL;
+}
+
 static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
 {
 	struct v3d_job *job = to_v3d_job(sched_job);
@@ -147,31 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
 	return fence;
 }
 
-static void
-v3d_job_timedout(struct drm_sched_job *sched_job)
+static struct dma_fence *
+v3d_tfu_job_run(struct drm_sched_job *sched_job)
 {
-	struct v3d_job *job = to_v3d_job(sched_job);
-	struct v3d_exec_info *exec = job->exec;
-	struct v3d_dev *v3d = exec->v3d;
-	enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
-	enum v3d_queue q;
-	u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
-	u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+	struct v3d_tfu_job *job = to_tfu_job(sched_job);
+	struct v3d_dev *v3d = job->v3d;
+	struct drm_device *dev = &v3d->drm;
+	struct dma_fence *fence;
 
-	/* If the current address or return address have changed, then
-	 * the GPU has probably made progress and we should delay the
-	 * reset.  This could fail if the GPU got in an infinite loop
-	 * in the CL, but that is pretty unlikely outside of an i-g-t
-	 * testcase.
-	 */
-	if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
-		job->timedout_ctca = ctca;
-		job->timedout_ctra = ctra;
+	fence = v3d_fence_create(v3d, V3D_TFU);
+	if (IS_ERR(fence))
+		return NULL;
 
-		schedule_delayed_work(&job->base.sched->work_tdr,
-				      job->base.sched->timeout);
-		return;
+	v3d->tfu_job = job;
+	if (job->done_fence)
+		dma_fence_put(job->done_fence);
+	job->done_fence = dma_fence_get(fence);
+
+	trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
+
+	V3D_WRITE(V3D_TFU_IIA, job->args.iia);
+	V3D_WRITE(V3D_TFU_IIS, job->args.iis);
+	V3D_WRITE(V3D_TFU_ICA, job->args.ica);
+	V3D_WRITE(V3D_TFU_IUA, job->args.iua);
+	V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
+	V3D_WRITE(V3D_TFU_IOS, job->args.ios);
+	V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
+	if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
+		V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
+		V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
+		V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
 	}
+	/* ICFG kicks off the job. */
+	V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+
+	return fence;
+}
+
+static void
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+{
+	enum v3d_queue q;
 
 	mutex_lock(&v3d->reset_lock);
 
@@ -196,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
 	mutex_unlock(&v3d->reset_lock);
 }
 
+static void
+v3d_job_timedout(struct drm_sched_job *sched_job)
+{
+	struct v3d_job *job = to_v3d_job(sched_job);
+	struct v3d_exec_info *exec = job->exec;
+	struct v3d_dev *v3d = exec->v3d;
+	enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+	u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+	u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+	/* If the current address or return address have changed, then
+	 * the GPU has probably made progress and we should delay the
+	 * reset.  This could fail if the GPU got in an infinite loop
+	 * in the CL, but that is pretty unlikely outside of an i-g-t
+	 * testcase.
+	 */
+	if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+		job->timedout_ctca = ctca;
+		job->timedout_ctra = ctra;
+		return;
+	}
+
+	v3d_gpu_reset_for_timeout(v3d, sched_job);
+}
+
+static void
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
+{
+	struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+	v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+}
+
 static const struct drm_sched_backend_ops v3d_sched_ops = {
 	.dependency = v3d_job_dependency,
 	.run_job = v3d_job_run,
@@ -203,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = {
 	.free_job = v3d_job_free
 };
 
+static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
+	.dependency = v3d_tfu_job_dependency,
+	.run_job = v3d_tfu_job_run,
+	.timedout_job = v3d_tfu_job_timedout,
+	.free_job = v3d_tfu_job_free
+};
+
 int
 v3d_sched_init(struct v3d_dev *v3d)
 {
@@ -233,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d)
 		return ret;
 	}
 
+	ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
+			     &v3d_tfu_sched_ops,
+			     hw_jobs_limit, job_hang_limit,
+			     msecs_to_jiffies(hang_limit_ms),
+			     "v3d_tfu");
+	if (ret) {
+		dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+			ret);
+		drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
+		drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 85dd351..edd984a 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -12,6 +12,28 @@
 #define TRACE_SYSTEM v3d
 #define TRACE_INCLUDE_FILE v3d_trace
 
+TRACE_EVENT(v3d_submit_cl_ioctl,
+	    TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea),
+	    TP_ARGS(dev, ct1qba, ct1qea),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ct1qba)
+			     __field(u32, ct1qea)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->ct1qba = ct1qba;
+			   __entry->ct1qea = ct1qea;
+			   ),
+
+	    TP_printk("dev=%u, RCL 0x%08x..0x%08x",
+		      __entry->dev,
+		      __entry->ct1qba,
+		      __entry->ct1qea)
+);
+
 TRACE_EVENT(v3d_submit_cl,
 	    TP_PROTO(struct drm_device *dev, bool is_render,
 		     uint64_t seqno,
@@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl,
 		      __entry->ctnqea)
 );
 
+TRACE_EVENT(v3d_bcl_irq,
+	    TP_PROTO(struct drm_device *dev,
+		     uint64_t seqno),
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u64, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%llu",
+		      __entry->dev,
+		      __entry->seqno)
+);
+
+TRACE_EVENT(v3d_rcl_irq,
+	    TP_PROTO(struct drm_device *dev,
+		     uint64_t seqno),
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u64, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%llu",
+		      __entry->dev,
+		      __entry->seqno)
+);
+
+TRACE_EVENT(v3d_tfu_irq,
+	    TP_PROTO(struct drm_device *dev,
+		     uint64_t seqno),
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u64, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%llu",
+		      __entry->dev,
+		      __entry->seqno)
+);
+
+TRACE_EVENT(v3d_submit_tfu_ioctl,
+	    TP_PROTO(struct drm_device *dev, u32 iia),
+	    TP_ARGS(dev, iia),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, iia)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->iia = iia;
+			   ),
+
+	    TP_printk("dev=%u, IIA 0x%08x",
+		      __entry->dev,
+		      __entry->iia)
+);
+
+TRACE_EVENT(v3d_submit_tfu,
+	    TP_PROTO(struct drm_device *dev,
+		     uint64_t seqno),
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u64, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%llu",
+		      __entry->dev,
+		      __entry->seqno)
+);
+
 TRACE_EVENT(v3d_reset_begin,
 	    TP_PROTO(struct drm_device *dev),
 	    TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1f1780c..f6f5cd80 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,6 +33,7 @@
 #include <linux/pm_runtime.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "uapi/drm/vc4_drm.h"
 #include "vc4_drv.h"
@@ -308,6 +309,8 @@ static void vc4_drm_unbind(struct device *dev)
 
 	drm_dev_unregister(drm);
 
+	drm_atomic_helper_shutdown(drm);
+
 	drm_mode_config_cleanup(drm);
 
 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index bd6ef1f..4f87b03 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -338,6 +338,7 @@ struct vc4_plane_state {
 	u32 pos0_offset;
 	u32 pos2_offset;
 	u32 ptr0_offset;
+	u32 lbm_offset;
 
 	/* Offset where the plane's dlist was last stored in the
 	 * hardware at vc4_crtc_atomic_flush() time.
@@ -369,6 +370,11 @@ struct vc4_plane_state {
 	 * to enable background color fill.
 	 */
 	bool needs_bg_fill;
+
+	/* Mark the dlist as initialized. Useful to avoid initializing it twice
+	 * when async update is not possible.
+	 */
+	bool dlist_initialized;
 };
 
 static inline struct vc4_plane_state *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 5b22e99..aea2b8d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -635,7 +635,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 	for (i = 0; i < exec->bo_count; i++) {
 		bo = to_vc4_bo(&exec->bo[i]->base);
 
-		ret = reservation_object_reserve_shared(bo->resv);
+		ret = reservation_object_reserve_shared(bo->resv, 1);
 		if (ret) {
 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 			return ret;
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
 	exec->fence = &fence->base;
 
 	if (out_sync)
-		drm_syncobj_replace_fence(out_sync, 0, exec->fence);
+		drm_syncobj_replace_fence(out_sync, exec->fence);
 
 	vc4_update_bo_seqnos(exec, seqno);
 
@@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 
 	if (args->in_sync) {
 		ret = drm_syncobj_find_fence(file_priv, args->in_sync,
-					     0, &in_fence);
+					     0, 0, &in_fence);
 		if (ret)
 			goto fail;
 
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c6635f2..75db62c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -129,12 +129,12 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
 
 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
 {
-	if (dst > src)
-		return VC4_SCALING_PPF;
-	else if (dst < src)
-		return VC4_SCALING_TPZ;
-	else
+	if (dst == src)
 		return VC4_SCALING_NONE;
+	if (3 * dst >= 2 * src)
+		return VC4_SCALING_PPF;
+	else
+		return VC4_SCALING_TPZ;
 }
 
 static bool plane_enabled(struct drm_plane_state *state)
@@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
 		return NULL;
 
 	memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+	vc4_state->dlist_initialized = 0;
 
 	__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
 
@@ -259,37 +260,51 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
 
 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
 {
-	struct drm_plane *plane = state->plane;
 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
 	struct drm_framebuffer *fb = state->fb;
 	struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
 	u32 subpixel_src_mask = (1 << 16) - 1;
 	u32 format = fb->format->format;
 	int num_planes = fb->format->num_planes;
-	u32 h_subsample = 1;
-	u32 v_subsample = 1;
-	int i;
+	struct drm_crtc_state *crtc_state;
+	u32 h_subsample, v_subsample;
+	int i, ret;
+
+	crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+							state->crtc);
+	if (!crtc_state) {
+		DRM_DEBUG_KMS("Invalid crtc state\n");
+		return -EINVAL;
+	}
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
+						  INT_MAX, true, true);
+	if (ret)
+		return ret;
+
+	h_subsample = drm_format_horz_chroma_subsampling(format);
+	v_subsample = drm_format_vert_chroma_subsampling(format);
 
 	for (i = 0; i < num_planes; i++)
 		vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
 
 	/* We don't support subpixel source positioning for scaling. */
-	if ((state->src_x & subpixel_src_mask) ||
-	    (state->src_y & subpixel_src_mask) ||
-	    (state->src_w & subpixel_src_mask) ||
-	    (state->src_h & subpixel_src_mask)) {
+	if ((state->src.x1 & subpixel_src_mask) ||
+	    (state->src.x2 & subpixel_src_mask) ||
+	    (state->src.y1 & subpixel_src_mask) ||
+	    (state->src.y2 & subpixel_src_mask)) {
 		return -EINVAL;
 	}
 
-	vc4_state->src_x = state->src_x >> 16;
-	vc4_state->src_y = state->src_y >> 16;
-	vc4_state->src_w[0] = state->src_w >> 16;
-	vc4_state->src_h[0] = state->src_h >> 16;
+	vc4_state->src_x = state->src.x1 >> 16;
+	vc4_state->src_y = state->src.y1 >> 16;
+	vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
+	vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
 
-	vc4_state->crtc_x = state->crtc_x;
-	vc4_state->crtc_y = state->crtc_y;
-	vc4_state->crtc_w = state->crtc_w;
-	vc4_state->crtc_h = state->crtc_h;
+	vc4_state->crtc_x = state->dst.x1;
+	vc4_state->crtc_y = state->dst.y1;
+	vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
+	vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
 
 	vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
 						       vc4_state->crtc_w);
@@ -302,8 +317,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
 	if (num_planes > 1) {
 		vc4_state->is_yuv = true;
 
-		h_subsample = drm_format_horz_chroma_subsampling(format);
-		v_subsample = drm_format_vert_chroma_subsampling(format);
 		vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
 		vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
 
@@ -314,52 +327,20 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
 			vc4_get_scaling_mode(vc4_state->src_h[1],
 					     vc4_state->crtc_h);
 
-		/* YUV conversion requires that horizontal scaling be enabled,
-		 * even on a plane that's otherwise 1:1. Looks like only PPF
-		 * works in that case, so let's pick that one.
+		/* YUV conversion requires that horizontal scaling be enabled
+		 * on the UV plane even if vc4_get_scaling_mode() returned
+		 * VC4_SCALING_NONE (which can happen when the down-scaling
+		 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+		 * case.
 		 */
-		if (vc4_state->is_unity)
-			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+		if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+			vc4_state->x_scaling[1] = VC4_SCALING_PPF;
 	} else {
+		vc4_state->is_yuv = false;
 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
 	}
 
-	/* No configuring scaling on the cursor plane, since it gets
-	   non-vblank-synced updates, and scaling requires requires
-	   LBM changes which have to be vblank-synced.
-	 */
-	if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
-		return -EINVAL;
-
-	/* Clamp the on-screen start x/y to 0.  The hardware doesn't
-	 * support negative y, and negative x wastes bandwidth.
-	 */
-	if (vc4_state->crtc_x < 0) {
-		for (i = 0; i < num_planes; i++) {
-			u32 cpp = fb->format->cpp[i];
-			u32 subs = ((i == 0) ? 1 : h_subsample);
-
-			vc4_state->offsets[i] += (cpp *
-						  (-vc4_state->crtc_x) / subs);
-		}
-		vc4_state->src_w[0] += vc4_state->crtc_x;
-		vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
-		vc4_state->crtc_x = 0;
-	}
-
-	if (vc4_state->crtc_y < 0) {
-		for (i = 0; i < num_planes; i++) {
-			u32 subs = ((i == 0) ? 1 : v_subsample);
-
-			vc4_state->offsets[i] += (fb->pitches[i] *
-						  (-vc4_state->crtc_y) / subs);
-		}
-		vc4_state->src_h[0] += vc4_state->crtc_y;
-		vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
-		vc4_state->crtc_y = 0;
-	}
-
 	return 0;
 }
 
@@ -400,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
 	u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
 	u32 lbm;
 
+	/* LBM is not needed when there's no vertical scaling. */
+	if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+	    vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+		return 0;
+
 	if (!vc4_state->is_yuv) {
-		if (vc4_state->is_unity)
-			return 0;
-		else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+		if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
 			lbm = pix_per_line * 8;
 		else {
 			/* In special cases, this multiplier might be 12. */
@@ -454,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
 	}
 }
 
+static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+	unsigned long irqflags;
+	u32 lbm_size;
+
+	lbm_size = vc4_lbm_size(state);
+	if (!lbm_size)
+		return 0;
+
+	if (WARN_ON(!vc4_state->lbm_offset))
+		return -EINVAL;
+
+	/* Allocate the LBM memory that the HVS will use for temporary
+	 * storage due to our scaling/format conversion.
+	 */
+	if (!vc4_state->lbm.allocated) {
+		int ret;
+
+		spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+		ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+						 &vc4_state->lbm,
+						 lbm_size, 32, 0, 0);
+		spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+
+		if (ret)
+			return ret;
+	} else {
+		WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+	}
+
+	vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
+
+	return 0;
+}
+
 /* Writes out a full display list for an active plane to the plane's
  * private dlist state.
  */
@@ -467,37 +488,21 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 	const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
 	u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
 	int num_planes = drm_format_num_planes(format->drm);
+	u32 h_subsample, v_subsample;
 	bool mix_plane_alpha;
 	bool covers_screen;
 	u32 scl0, scl1, pitch0;
-	u32 lbm_size, tiling;
-	unsigned long irqflags;
+	u32 tiling;
 	u32 hvs_format = format->hvs;
 	int ret, i;
 
+	if (vc4_state->dlist_initialized)
+		return 0;
+
 	ret = vc4_plane_setup_clipping_and_scaling(state);
 	if (ret)
 		return ret;
 
-	/* Allocate the LBM memory that the HVS will use for temporary
-	 * storage due to our scaling/format conversion.
-	 */
-	lbm_size = vc4_lbm_size(state);
-	if (lbm_size) {
-		if (!vc4_state->lbm.allocated) {
-			spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
-			ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
-							 &vc4_state->lbm,
-							 lbm_size, 32, 0, 0);
-			spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
-		} else {
-			WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
-		}
-	}
-
-	if (ret)
-		return ret;
-
 	/* SCL1 is used for Cb/Cr scaling of planar formats.  For RGB
 	 * and 4:4:4, scl1 should be set to scl0 so both channels of
 	 * the scaler do the same thing.  For YUV, the Y plane needs
@@ -512,26 +517,77 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 		scl1 = vc4_get_scl_field(state, 0);
 	}
 
+	h_subsample = drm_format_horz_chroma_subsampling(format->drm);
+	v_subsample = drm_format_vert_chroma_subsampling(format->drm);
+
 	switch (base_format_mod) {
 	case DRM_FORMAT_MOD_LINEAR:
 		tiling = SCALER_CTL0_TILING_LINEAR;
 		pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
+
+		/* Adjust the base pointer to the first pixel to be scanned
+		 * out.
+		 */
+		for (i = 0; i < num_planes; i++) {
+			vc4_state->offsets[i] += vc4_state->src_y /
+						 (i ? v_subsample : 1) *
+						 fb->pitches[i];
+			vc4_state->offsets[i] += vc4_state->src_x /
+						 (i ? h_subsample : 1) *
+						 fb->format->cpp[i];
+		}
+
 		break;
 
 	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
-		/* For T-tiled, the FB pitch is "how many bytes from
-		 * one row to the next, such that pitch * tile_h ==
-		 * tile_size * tiles_per_row."
-		 */
 		u32 tile_size_shift = 12; /* T tiles are 4kb */
+		/* Whole-tile offsets, mostly for setting the pitch. */
+		u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
 		u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
+		u32 tile_w_mask = (1 << tile_w_shift) - 1;
+		/* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
+		 * the height (in pixels) of a 4k tile.
+		 */
+		u32 tile_h_mask = (2 << tile_h_shift) - 1;
+		/* For T-tiled, the FB pitch is "how many bytes from one row to
+		 * the next, such that
+		 *
+		 *	pitch * tile_h == tile_size * tiles_per_row
+		 */
 		u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
+		u32 tiles_l = vc4_state->src_x >> tile_w_shift;
+		u32 tiles_r = tiles_w - tiles_l;
+		u32 tiles_t = vc4_state->src_y >> tile_h_shift;
+		/* Intra-tile offsets, which modify the base address (the
+		 * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
+		 * base address).
+		 */
+		u32 tile_y = (vc4_state->src_y >> 4) & 1;
+		u32 subtile_y = (vc4_state->src_y >> 2) & 3;
+		u32 utile_y = vc4_state->src_y & 3;
+		u32 x_off = vc4_state->src_x & tile_w_mask;
+		u32 y_off = vc4_state->src_y & tile_h_mask;
 
 		tiling = SCALER_CTL0_TILING_256B_OR_T;
+		pitch0 = (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
+			  VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
+			  VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
+			  VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
+		vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
+		vc4_state->offsets[0] += subtile_y << 8;
+		vc4_state->offsets[0] += utile_y << 4;
 
-		pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
-			  VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
-			  VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
+		/* Rows of tiles alternate left-to-right and right-to-left. */
+		if (tiles_t & 1) {
+			pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
+			vc4_state->offsets[0] += (tiles_w - tiles_l) <<
+						 tile_size_shift;
+			vc4_state->offsets[0] -= (1 + !tile_y) << 10;
+		} else {
+			vc4_state->offsets[0] += tiles_l << tile_size_shift;
+			vc4_state->offsets[0] += tile_y << 10;
+		}
+
 		break;
 	}
 
@@ -667,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 		vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
 	}
 
+	vc4_state->lbm_offset = 0;
+
 	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
 	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
 	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
 	    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
-		/* LBM Base Address. */
+		/* Reserve a slot for the LBM Base Address. The real value will
+		 * be set when calling vc4_plane_allocate_lbm().
+		 */
 		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
-		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
-			vc4_dlist_write(vc4_state, vc4_state->lbm.start);
-		}
+		    vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+			vc4_state->lbm_offset = vc4_state->dlist_count++;
 
 		if (num_planes > 1) {
 			/* Emit Cb/Cr as channel 0 and Y as channel
@@ -721,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 	vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
 				   state->alpha != DRM_BLEND_ALPHA_OPAQUE;
 
+	/* Flag the dlist as initialized to avoid checking it twice in case
+	 * the async update check already called vc4_plane_mode_set() and
+	 * decided to fallback to sync update because async update was not
+	 * possible.
+	 */
+	vc4_state->dlist_initialized = 1;
+
 	return 0;
 }
 
@@ -735,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
 				  struct drm_plane_state *state)
 {
 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+	int ret;
 
 	vc4_state->dlist_count = 0;
 
-	if (plane_enabled(state))
-		return vc4_plane_mode_set(plane, state);
-	else
+	if (!plane_enabled(state))
 		return 0;
+
+	ret = vc4_plane_mode_set(plane, state);
+	if (ret)
+		return ret;
+
+	return vc4_plane_allocate_lbm(state);
 }
 
 static void vc4_plane_atomic_update(struct drm_plane *plane,
@@ -809,30 +880,50 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
 {
 	struct vc4_plane_state *vc4_state, *new_vc4_state;
 
-	if (plane->state->fb != state->fb) {
-		vc4_plane_async_set_fb(plane, state->fb);
-		drm_atomic_set_fb_for_plane(plane->state, state->fb);
-	}
-
-	/* Set the cursor's position on the screen.  This is the
-	 * expected change from the drm_mode_cursor_universal()
-	 * helper.
-	 */
+	drm_atomic_set_fb_for_plane(plane->state, state->fb);
 	plane->state->crtc_x = state->crtc_x;
 	plane->state->crtc_y = state->crtc_y;
-
-	/* Allow changing the start position within the cursor BO, if
-	 * that matters.
-	 */
+	plane->state->crtc_w = state->crtc_w;
+	plane->state->crtc_h = state->crtc_h;
 	plane->state->src_x = state->src_x;
 	plane->state->src_y = state->src_y;
-
-	/* Update the display list based on the new crtc_x/y. */
-	vc4_plane_atomic_check(plane, state);
+	plane->state->src_w = state->src_w;
+	plane->state->src_h = state->src_h;
+	plane->state->src_h = state->src_h;
+	plane->state->alpha = state->alpha;
+	plane->state->pixel_blend_mode = state->pixel_blend_mode;
+	plane->state->rotation = state->rotation;
+	plane->state->zpos = state->zpos;
+	plane->state->normalized_zpos = state->normalized_zpos;
+	plane->state->color_encoding = state->color_encoding;
+	plane->state->color_range = state->color_range;
+	plane->state->src = state->src;
+	plane->state->dst = state->dst;
+	plane->state->visible = state->visible;
 
 	new_vc4_state = to_vc4_plane_state(state);
 	vc4_state = to_vc4_plane_state(plane->state);
 
+	vc4_state->crtc_x = new_vc4_state->crtc_x;
+	vc4_state->crtc_y = new_vc4_state->crtc_y;
+	vc4_state->crtc_h = new_vc4_state->crtc_h;
+	vc4_state->crtc_w = new_vc4_state->crtc_w;
+	vc4_state->src_x = new_vc4_state->src_x;
+	vc4_state->src_y = new_vc4_state->src_y;
+	memcpy(vc4_state->src_w, new_vc4_state->src_w,
+	       sizeof(vc4_state->src_w));
+	memcpy(vc4_state->src_h, new_vc4_state->src_h,
+	       sizeof(vc4_state->src_h));
+	memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
+	       sizeof(vc4_state->x_scaling));
+	memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
+	       sizeof(vc4_state->y_scaling));
+	vc4_state->is_unity = new_vc4_state->is_unity;
+	vc4_state->is_yuv = new_vc4_state->is_yuv;
+	memcpy(vc4_state->offsets, new_vc4_state->offsets,
+	       sizeof(vc4_state->offsets));
+	vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
+
 	/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
 	vc4_state->dlist[vc4_state->pos0_offset] =
 		new_vc4_state->dlist[vc4_state->pos0_offset];
@@ -856,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
 static int vc4_plane_atomic_async_check(struct drm_plane *plane,
 					struct drm_plane_state *state)
 {
-	/* No configuring new scaling in the fast path. */
-	if (plane->state->crtc_w != state->crtc_w ||
-	    plane->state->crtc_h != state->crtc_h ||
-	    plane->state->src_w != state->src_w ||
-	    plane->state->src_h != state->src_h)
+	struct vc4_plane_state *old_vc4_state, *new_vc4_state;
+	int ret;
+	u32 i;
+
+	ret = vc4_plane_mode_set(plane, state);
+	if (ret)
+		return ret;
+
+	old_vc4_state = to_vc4_plane_state(plane->state);
+	new_vc4_state = to_vc4_plane_state(state);
+	if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
+	    old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
+	    old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
+	    old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
+	    vc4_lbm_size(plane->state) != vc4_lbm_size(state))
 		return -EINVAL;
 
+	/* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
+	 * if anything else has changed, fallback to a sync update.
+	 */
+	for (i = 0; i < new_vc4_state->dlist_count; i++) {
+		if (i == new_vc4_state->pos0_offset ||
+		    i == new_vc4_state->pos2_offset ||
+		    i == new_vc4_state->ptr0_offset ||
+		    (new_vc4_state->lbm_offset &&
+		     i == new_vc4_state->lbm_offset))
+			continue;
+
+		if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
+			return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -914,7 +1030,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
 
 static void vc4_plane_destroy(struct drm_plane *plane)
 {
-	drm_plane_helper_disable(plane, NULL);
 	drm_plane_cleanup(plane);
 }
 
@@ -980,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
 	struct drm_plane *plane = NULL;
 	struct vc4_plane *vc4_plane;
 	u32 formats[ARRAY_SIZE(hvs_formats)];
-	u32 num_formats = 0;
 	int ret = 0;
 	unsigned i;
 	static const uint64_t modifiers[] = {
@@ -997,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
 	if (!vc4_plane)
 		return ERR_PTR(-ENOMEM);
 
-	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
-		/* Don't allow YUV in cursor planes, since that means
-		 * tuning on the scaler, which we don't allow for the
-		 * cursor.
-		 */
-		if (type != DRM_PLANE_TYPE_CURSOR ||
-		    hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
-			formats[num_formats++] = hvs_formats[i].drm;
-		}
-	}
+	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
+		formats[i] = hvs_formats[i].drm;
+
 	plane = &vc4_plane->base;
 	ret = drm_universal_plane_init(dev, plane, 0,
 				       &vc4_plane_funcs,
-				       formats, num_formats,
+				       formats, ARRAY_SIZE(formats),
 				       modifiers, type, NULL);
 
 	drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d6864fa..9310880 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1037,14 +1037,18 @@ enum hvs_pixel_format {
 #define SCALER_TILE_HEIGHT_MASK			VC4_MASK(15, 0)
 #define SCALER_TILE_HEIGHT_SHIFT		0
 
+/* Common PITCH0 fields */
+#define SCALER_PITCH0_SINK_PIX_MASK		VC4_MASK(31, 26)
+#define SCALER_PITCH0_SINK_PIX_SHIFT		26
+
 /* PITCH0 fields for T-tiled. */
 #define SCALER_PITCH0_TILE_WIDTH_L_MASK		VC4_MASK(22, 16)
 #define SCALER_PITCH0_TILE_WIDTH_L_SHIFT	16
 #define SCALER_PITCH0_TILE_LINE_DIR		BIT(15)
 #define SCALER_PITCH0_TILE_INITIAL_LINE_DIR	BIT(14)
 /* Y offset within a tile. */
-#define SCALER_PITCH0_TILE_Y_OFFSET_MASK	VC4_MASK(13, 7)
-#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT	7
+#define SCALER_PITCH0_TILE_Y_OFFSET_MASK	VC4_MASK(13, 8)
+#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT	8
 #define SCALER_PITCH0_TILE_WIDTH_R_MASK		VC4_MASK(6, 0)
 #define SCALER_PITCH0_TILE_WIDTH_R_SHIFT	0
 
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index ec6af8b..5930fac 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -431,7 +431,8 @@ static void vgem_release(struct drm_device *dev)
 }
 
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME |
+					  DRIVER_RENDER,
 	.release			= vgem_release,
 	.open				= vgem_open,
 	.postclose			= vgem_postclose,
@@ -471,31 +472,31 @@ static int __init vgem_init(void)
 	if (!vgem_device)
 		return -ENOMEM;
 
-	ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
-	if (ret)
-		goto out_free;
-
 	vgem_device->platform =
 		platform_device_register_simple("vgem", -1, NULL, 0);
 	if (IS_ERR(vgem_device->platform)) {
 		ret = PTR_ERR(vgem_device->platform);
-		goto out_fini;
+		goto out_free;
 	}
 
 	dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
 				     DMA_BIT_MASK(64));
+	ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
+			   &vgem_device->platform->dev);
+	if (ret)
+		goto out_unregister;
 
 	/* Final step: expose the device/driver to userspace */
 	ret  = drm_dev_register(&vgem_device->drm, 0);
 	if (ret)
-		goto out_unregister;
+		goto out_fini;
 
 	return 0;
 
-out_unregister:
-	platform_device_unregister(vgem_device->platform);
 out_fini:
 	drm_dev_fini(&vgem_device->drm);
+out_unregister:
+	platform_device_unregister(vgem_device->platform);
 out_free:
 	kfree(vgem_device);
 	return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index e6ee713..c1c420a 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -180,7 +180,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 	reservation_object_lock(resv, NULL);
 	if (arg->flags & VGEM_FENCE_WRITE)
 		reservation_object_add_excl_fence(resv, fence);
-	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+	else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
 		reservation_object_add_shared_fence(resv, fence);
 	reservation_object_unlock(resv);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8f8fed4..b5580b1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -169,6 +169,12 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
 	struct drm_display_mode *mode = NULL;
 	int count, width, height;
 
+	if (output->edid) {
+		count = drm_add_edid_modes(connector, output->edid);
+		if (count)
+			return count;
+	}
+
 	width  = le32_to_cpu(output->info.r.width);
 	height = le32_to_cpu(output->info.r.height);
 	count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
@@ -287,6 +293,8 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
 	drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
 	drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+	if (vgdev->has_edid)
+		drm_connector_attach_edid_property(connector);
 
 	drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
 			 DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -378,6 +386,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
 
 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
 {
+	int i;
+
+	for (i = 0 ; i < vgdev->num_scanouts; ++i)
+		kfree(vgdev->outputs[i].edid);
 	virtio_gpu_fbdev_fini(vgdev);
 	drm_mode_config_cleanup(vgdev->ddev);
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 757ca28..0887e0b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -53,6 +53,37 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
 									  0,
 									  "virtiodrmfb");
 
+		/*
+		 * Normally the drm_dev_set_unique() call is done by core DRM.
+		 * The following comment covers, why virtio cannot rely on it.
+		 *
+		 * Unlike the other virtual GPU drivers, virtio abstracts the
+		 * underlying bus type by using struct virtio_device.
+		 *
+		 * Hence the dev_is_pci() check, used in core DRM, will fail
+		 * and the unique returned will be the virtio_device "virtio0",
+		 * while a "pci:..." one is required.
+		 *
+		 * A few other ideas were considered:
+		 * - Extend the dev_is_pci() check [in drm_set_busid] to
+		 *   consider virtio.
+		 *   Seems like a bigger hack than what we have already.
+		 *
+		 * - Point drm_device::dev to the parent of the virtio_device
+		 *   Semantic changes:
+		 *   * Using the wrong device for i2c, framebuffer_alloc and
+		 *     prime import.
+		 *   Visual changes:
+		 *   * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
+		 *     will print the wrong information.
+		 *
+		 * We could address the latter issues, by introducing
+		 * drm_device::bus_dev, ... which would be used solely for this.
+		 *
+		 * So for the moment keep things as-is, with a bulky comment
+		 * for the next person who feels like removing this
+		 * drm_dev_set_unique() quirk.
+		 */
 		snprintf(unique, sizeof(unique), "pci:%s", pname);
 		ret = drm_dev_set_unique(dev, unique);
 		if (ret)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index d9287c1..f7f32a8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -80,6 +80,7 @@ static unsigned int features[] = {
 	 */
 	VIRTIO_GPU_F_VIRGL,
 #endif
+	VIRTIO_GPU_F_EDID,
 };
 static struct virtio_driver virtio_gpu_driver = {
 	.feature_table = features,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d29f0c7..1deb41d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -47,8 +47,8 @@
 #define DRIVER_DATE "0"
 
 #define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
 
 /* virtgpu_drm_bus.c */
 int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
@@ -65,6 +65,7 @@ struct virtio_gpu_object {
 	struct ttm_placement		placement;
 	struct ttm_buffer_object	tbo;
 	struct ttm_bo_kmap_obj		kmap;
+	bool created;
 };
 #define gem_to_virtio_gpu_obj(gobj) \
 	container_of((gobj), struct virtio_gpu_object, gem_base)
@@ -114,6 +115,7 @@ struct virtio_gpu_output {
 	struct drm_encoder enc;
 	struct virtio_gpu_display_one info;
 	struct virtio_gpu_update_cursor cursor;
+	struct edid *edid;
 	int cur_x;
 	int cur_y;
 	bool enabled;
@@ -130,6 +132,7 @@ struct virtio_gpu_framebuffer {
 	int x1, y1, x2, y2; /* dirty rect */
 	spinlock_t dirty_lock;
 	uint32_t hw_res_handle;
+	struct virtio_gpu_fence *fence;
 };
 #define to_virtio_gpu_framebuffer(x) \
 	container_of(x, struct virtio_gpu_framebuffer, base)
@@ -142,9 +145,6 @@ struct virtio_gpu_fbdev {
 };
 
 struct virtio_gpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	bool				mem_global_referenced;
 	struct ttm_bo_device		bdev;
 };
 
@@ -190,8 +190,7 @@ struct virtio_gpu_device {
 	struct kmem_cache *vbufs;
 	bool vqs_ready;
 
-	struct idr	resource_idr;
-	spinlock_t resource_idr_lock;
+	struct ida	resource_ida;
 
 	wait_queue_head_t resp_wq;
 	/* current display info */
@@ -200,10 +199,10 @@ struct virtio_gpu_device {
 
 	struct virtio_gpu_fence_driver fence_drv;
 
-	struct idr	ctx_id_idr;
-	spinlock_t ctx_id_idr_lock;
+	struct ida	ctx_id_ida;
 
 	bool has_virgl_3d;
+	bool has_edid;
 
 	struct work_struct config_changed_work;
 
@@ -259,11 +258,8 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
 /* virtio vg */
 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
-			       uint32_t *resid);
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
-				    uint32_t resource_id,
+				    struct virtio_gpu_object *bo,
 				    uint32_t format,
 				    uint32_t width,
 				    uint32_t height);
@@ -274,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 					uint64_t offset,
 					__le32 width, __le32 height,
 					__le32 x, __le32 y,
-					struct virtio_gpu_fence **fence);
+					struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 				   uint32_t resource_id,
 				   uint32_t x, uint32_t y,
@@ -285,8 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 				uint32_t x, uint32_t y);
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object *obj,
-			     uint32_t resource_id,
-			     struct virtio_gpu_fence **fence);
+			     struct virtio_gpu_fence *fence);
 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
 			      struct virtio_gpu_object *obj);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@@ -298,6 +293,7 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 			      int idx, int version,
 			      struct virtio_gpu_drv_cap_cache **cache_p);
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 				   uint32_t nlen, const char *name);
 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@@ -310,22 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 					    uint32_t resource_id);
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 			   void *data, uint32_t data_size,
-			   uint32_t ctx_id, struct virtio_gpu_fence **fence);
+			   uint32_t ctx_id, struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 					  uint32_t resource_id, uint32_t ctx_id,
 					  uint64_t offset, uint32_t level,
 					  struct virtio_gpu_box *box,
-					  struct virtio_gpu_fence **fence);
+					  struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 					struct virtio_gpu_object *bo,
 					uint32_t ctx_id,
 					uint64_t offset, uint32_t level,
 					struct virtio_gpu_box *box,
-					struct virtio_gpu_fence **fence);
+					struct virtio_gpu_fence *fence);
 void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
-				  struct virtio_gpu_resource_create_3d *rc_3d,
-				  struct virtio_gpu_fence **fence);
+				  struct virtio_gpu_object *bo,
+				  struct virtio_gpu_resource_create_3d *rc_3d);
 void virtio_gpu_ctrl_ack(struct virtqueue *vq);
 void virtio_gpu_cursor_ack(struct virtqueue *vq);
 void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -353,9 +349,12 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /* virtio_gpu_fence.c */
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(
+	struct virtio_gpu_device *vgdev);
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
-			  struct virtio_gpu_fence **fence);
+			  struct virtio_gpu_fence *fence);
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
 				    u64 last_seq);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index cea749f..fb1cc8b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -214,7 +214,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct virtio_gpu_object *obj;
-	uint32_t resid, format, size;
+	uint32_t format, size;
 	int ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -231,8 +231,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	virtio_gpu_resource_id_get(vgdev, &resid);
-	virtio_gpu_cmd_create_resource(vgdev, resid, format,
+	virtio_gpu_cmd_create_resource(vgdev, obj, format,
 				       mode_cmd.width, mode_cmd.height);
 
 	ret = virtio_gpu_object_kmap(obj);
@@ -242,7 +241,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	}
 
 	/* attach the object to the resource */
-	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+	ret = virtio_gpu_object_attach(vgdev, obj, NULL);
 	if (ret)
 		goto err_obj_attach;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a..4d6826b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,28 +67,43 @@ static const struct dma_fence_ops virtio_fence_ops = {
 	.timeline_value_str  = virtio_timeline_value_str,
 };
 
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+{
+	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+	struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
+							GFP_ATOMIC);
+	if (!fence)
+		return fence;
+
+	fence->drv = drv;
+	dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+
+	return fence;
+}
+
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
+{
+	if (!fence)
+		return;
+
+	dma_fence_put(&fence->f);
+}
+
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
-			  struct virtio_gpu_fence **fence)
+			  struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
 	unsigned long irq_flags;
 
-	*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
-	if ((*fence) == NULL)
-		return -ENOMEM;
-
 	spin_lock_irqsave(&drv->lock, irq_flags);
-	(*fence)->drv = drv;
-	(*fence)->seq = ++drv->sync_seq;
-	dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
-		       drv->context, (*fence)->seq);
-	dma_fence_get(&(*fence)->f);
-	list_add_tail(&(*fence)->node, &drv->fences);
+	fence->seq = ++drv->sync_seq;
+	dma_fence_get(&fence->f);
+	list_add_tail(&fence->node, &drv->fences);
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
 
 	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
-	cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+	cmd_hdr->fence_id = cpu_to_le64(fence->seq);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 82c817f..f065863 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -87,7 +87,6 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
 	struct virtio_gpu_object *obj;
 	int ret;
 	uint32_t pitch;
-	uint32_t resid;
 	uint32_t format;
 
 	if (args->bpp != 32)
@@ -103,13 +102,12 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
 		goto fail;
 
 	format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
-	virtio_gpu_resource_id_get(vgdev, &resid);
-	virtio_gpu_cmd_create_resource(vgdev, resid, format,
+	obj = gem_to_virtio_gpu_obj(gobj);
+	virtio_gpu_cmd_create_resource(vgdev, obj, format,
 				       args->width, args->height);
 
 	/* attach the object to the resource */
-	obj = gem_to_virtio_gpu_obj(gobj);
-	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+	ret = virtio_gpu_object_attach(vgdev, obj, NULL);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index f16b875d..161b80f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -28,6 +28,7 @@
 #include <drm/drmP.h>
 #include <drm/virtgpu_drm.h>
 #include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/sync_file.h>
 
 #include "virtgpu_drv.h"
 
@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
 	struct drm_gem_object *gobj;
-	struct virtio_gpu_fence *fence;
+	struct virtio_gpu_fence *out_fence;
 	struct virtio_gpu_object *qobj;
 	int ret;
 	uint32_t *bo_handles = NULL;
@@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 	struct ttm_validate_buffer *buflist = NULL;
 	int i;
 	struct ww_acquire_ctx ticket;
+	struct sync_file *sync_file;
+	int in_fence_fd = exbuf->fence_fd;
+	int out_fence_fd = -1;
 	void *buf;
 
 	if (vgdev->has_virgl_3d == false)
 		return -ENOSYS;
 
+	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
+		return -EINVAL;
+
+	exbuf->fence_fd = -1;
+
+	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+		struct dma_fence *in_fence;
+
+		in_fence = sync_file_get_fence(in_fence_fd);
+
+		if (!in_fence)
+			return -EINVAL;
+
+		/*
+		 * Wait if the fence is from a foreign context, or if the fence
+		 * array contains any fence from a foreign context.
+		 */
+		ret = 0;
+		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+			ret = dma_fence_wait(in_fence, true);
+
+		dma_fence_put(in_fence);
+		if (ret)
+			return ret;
+	}
+
+	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+		if (out_fence_fd < 0)
+			return out_fence_fd;
+	}
+
 	INIT_LIST_HEAD(&validate_list);
 	if (exbuf->num_bo_handles) {
 
@@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 					   sizeof(struct ttm_validate_buffer),
 					   GFP_KERNEL | __GFP_ZERO);
 		if (!bo_handles || !buflist) {
-			kvfree(bo_handles);
-			kvfree(buflist);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto out_unused_fd;
 		}
 
 		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
 		if (copy_from_user(bo_handles, user_bo_handles,
 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
 			ret = -EFAULT;
-			kvfree(bo_handles);
-			kvfree(buflist);
-			return ret;
+			goto out_unused_fd;
 		}
 
 		for (i = 0; i < exbuf->num_bo_handles; i++) {
 			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
 			if (!gobj) {
-				kvfree(bo_handles);
-				kvfree(buflist);
-				return -ENOENT;
+				ret = -ENOENT;
+				goto out_unused_fd;
 			}
 
 			qobj = gem_to_virtio_gpu_obj(gobj);
@@ -156,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 			list_add(&buflist[i].head, &validate_list);
 		}
 		kvfree(bo_handles);
+		bo_handles = NULL;
 	}
 
 	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -168,22 +201,48 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 		ret = PTR_ERR(buf);
 		goto out_unresv;
 	}
-	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
-			      vfpriv->ctx_id, &fence);
 
-	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+	out_fence = virtio_gpu_fence_alloc(vgdev);
+	if(!out_fence) {
+		ret = -ENOMEM;
+		goto out_memdup;
+	}
+
+	if (out_fence_fd >= 0) {
+		sync_file = sync_file_create(&out_fence->f);
+		if (!sync_file) {
+			dma_fence_put(&out_fence->f);
+			ret = -ENOMEM;
+			goto out_memdup;
+		}
+
+		exbuf->fence_fd = out_fence_fd;
+		fd_install(out_fence_fd, sync_file->file);
+	}
+
+	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
+			      vfpriv->ctx_id, out_fence);
+
+	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
 
 	/* fence the command bo */
 	virtio_gpu_unref_list(&validate_list);
 	kvfree(buflist);
-	dma_fence_put(&fence->f);
 	return 0;
 
+out_memdup:
+	kfree(buf);
 out_unresv:
 	ttm_eu_backoff_reservation(&ticket, &validate_list);
 out_free:
 	virtio_gpu_unref_list(&validate_list);
+out_unused_fd:
+	kvfree(bo_handles);
 	kvfree(buflist);
+
+	if (out_fence_fd >= 0)
+		put_unused_fd(out_fence_fd);
+
 	return ret;
 }
 
@@ -217,7 +276,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_resource_create *rc = data;
 	int ret;
-	uint32_t res_id;
 	struct virtio_gpu_object *qobj;
 	struct drm_gem_object *obj;
 	uint32_t handle = 0;
@@ -244,8 +302,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	INIT_LIST_HEAD(&validate_list);
 	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
 
-	virtio_gpu_resource_id_get(vgdev, &res_id);
-
 	size = rc->size;
 
 	/* allocate a single page size object */
@@ -253,17 +309,15 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 		size = PAGE_SIZE;
 
 	qobj = virtio_gpu_alloc_object(dev, size, false, false);
-	if (IS_ERR(qobj)) {
-		ret = PTR_ERR(qobj);
-		goto fail_id;
-	}
+	if (IS_ERR(qobj))
+		return PTR_ERR(qobj);
 	obj = &qobj->gem_base;
 
 	if (!vgdev->has_virgl_3d) {
-		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
+		virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
 					       rc->width, rc->height);
 
-		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
+		ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
 	} else {
 		/* use a gem reference since unref list undoes them */
 		drm_gem_object_get(&qobj->gem_base);
@@ -276,7 +330,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 			goto fail_unref;
 		}
 
-		rc_3d.resource_id = cpu_to_le32(res_id);
+		rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
 		rc_3d.target = cpu_to_le32(rc->target);
 		rc_3d.format = cpu_to_le32(rc->format);
 		rc_3d.bind = cpu_to_le32(rc->bind);
@@ -288,17 +342,21 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
 		rc_3d.flags = cpu_to_le32(rc->flags);
 
-		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
-		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
+		fence = virtio_gpu_fence_alloc(vgdev);
+		if (!fence) {
+			ret = -ENOMEM;
+			goto fail_backoff;
+		}
+
+		virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
+		ret = virtio_gpu_object_attach(vgdev, qobj, fence);
 		if (ret) {
-			ttm_eu_backoff_reservation(&ticket, &validate_list);
-			goto fail_unref;
+			virtio_gpu_fence_cleanup(fence);
+			goto fail_backoff;
 		}
 		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
 	}
 
-	qobj->hw_res_handle = res_id;
-
 	ret = drm_gem_handle_create(file_priv, obj, &handle);
 	if (ret) {
 
@@ -311,7 +369,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	}
 	drm_gem_object_put_unlocked(obj);
 
-	rc->res_handle = res_id; /* similiar to a VM address */
+	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
 	rc->bo_handle = handle;
 
 	if (vgdev->has_virgl_3d) {
@@ -319,6 +377,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 		dma_fence_put(&fence->f);
 	}
 	return 0;
+fail_backoff:
+	ttm_eu_backoff_reservation(&ticket, &validate_list);
 fail_unref:
 	if (vgdev->has_virgl_3d) {
 		virtio_gpu_unref_list(&validate_list);
@@ -326,8 +386,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	}
 //fail_obj:
 //	drm_gem_object_handle_unreference_unlocked(obj);
-fail_id:
-	virtio_gpu_resource_id_put(vgdev, res_id);
 	return ret;
 }
 
@@ -383,10 +441,16 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 		goto out_unres;
 
 	convert_to_hw_box(&box, &args->box);
+
+	fence = virtio_gpu_fence_alloc(vgdev);
+	if (!fence) {
+		ret = -ENOMEM;
+		goto out_unres;
+	}
 	virtio_gpu_cmd_transfer_from_host_3d
 		(vgdev, qobj->hw_res_handle,
 		 vfpriv->ctx_id, offset, args->level,
-		 &box, &fence);
+		 &box, fence);
 	reservation_object_add_excl_fence(qobj->tbo.resv,
 					  &fence->f);
 
@@ -432,10 +496,15 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 			(vgdev, qobj, offset,
 			 box.w, box.h, box.x, box.y, NULL);
 	} else {
+		fence = virtio_gpu_fence_alloc(vgdev);
+		if (!fence) {
+			ret = -ENOMEM;
+			goto out_unres;
+		}
 		virtio_gpu_cmd_transfer_to_host_3d
 			(vgdev, qobj,
 			 vfpriv ? vfpriv->ctx_id : 0, offset,
-			 args->level, &box, &fence);
+			 args->level, &box, fence);
 		reservation_object_add_excl_fence(qobj->tbo.resv,
 						  &fence->f);
 		dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 65060c0..3af6181 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
 		     events_read, &events_read);
 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+		if (vgdev->has_edid)
+			virtio_gpu_cmd_get_edids(vgdev);
 		virtio_gpu_cmd_get_display_info(vgdev);
 		drm_helper_hpd_irq_event(vgdev->ddev);
 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
@@ -52,39 +54,23 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 		      events_clear, &events_clear);
 }
 
-static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
-				  uint32_t *resid)
+static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
+				      uint32_t nlen, const char *name)
 {
-	int handle;
+	int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
 
-	idr_preload(GFP_KERNEL);
-	spin_lock(&vgdev->ctx_id_idr_lock);
-	handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
-	spin_unlock(&vgdev->ctx_id_idr_lock);
-	idr_preload_end();
-	*resid = handle;
-}
-
-static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
-	spin_lock(&vgdev->ctx_id_idr_lock);
-	idr_remove(&vgdev->ctx_id_idr, id);
-	spin_unlock(&vgdev->ctx_id_idr_lock);
-}
-
-static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
-				      uint32_t nlen, const char *name,
-				      uint32_t *ctx_id)
-{
-	virtio_gpu_ctx_id_get(vgdev, ctx_id);
-	virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
+	if (handle < 0)
+		return handle;
+	handle += 1;
+	virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
+	return handle;
 }
 
 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
 				      uint32_t ctx_id)
 {
 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
-	virtio_gpu_ctx_id_put(vgdev, ctx_id);
+	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
 }
 
 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -151,10 +137,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
 	vgdev->dev = dev->dev;
 
 	spin_lock_init(&vgdev->display_info_lock);
-	spin_lock_init(&vgdev->ctx_id_idr_lock);
-	idr_init(&vgdev->ctx_id_idr);
-	spin_lock_init(&vgdev->resource_idr_lock);
-	idr_init(&vgdev->resource_idr);
+	ida_init(&vgdev->ctx_id_ida);
+	ida_init(&vgdev->resource_ida);
 	init_waitqueue_head(&vgdev->resp_wq);
 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
@@ -174,6 +158,10 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
 #else
 	DRM_INFO("virgl 3d acceleration not supported by guest\n");
 #endif
+	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+		vgdev->has_edid = true;
+		DRM_INFO("EDID support available.\n");
+	}
 
 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
 	if (ret) {
@@ -219,6 +207,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
 
 	if (num_capsets)
 		virtio_gpu_get_capsets(vgdev, num_capsets);
+	if (vgdev->has_edid)
+		virtio_gpu_cmd_get_edids(vgdev);
 	virtio_gpu_cmd_get_display_info(vgdev);
 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
 			   5 * HZ);
@@ -271,7 +261,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv;
-	uint32_t id;
+	int id;
 	char dbgname[TASK_COMM_LEN];
 
 	/* can't create contexts without 3d renderer */
@@ -284,7 +274,11 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 		return -ENOMEM;
 
 	get_task_comm(dbgname, current);
-	virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
+	id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
+	if (id < 0) {
+		kfree(vfpriv);
+		return id;
+	}
 
 	vfpriv->ctx_id = id;
 	file->driver_priv = vfpriv;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index eca7655..f39a183 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -25,6 +25,23 @@
 
 #include "virtgpu_drv.h"
 
+static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+				       uint32_t *resid)
+{
+	int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
+	if (handle < 0)
+		return handle;
+
+	*resid = handle + 1;
+	return 0;
+}
+
+static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+	ida_free(&vgdev->resource_ida, id - 1);
+}
+
 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
 	struct virtio_gpu_object *bo;
@@ -33,13 +50,14 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 	bo = container_of(tbo, struct virtio_gpu_object, tbo);
 	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
 
-	if (bo->hw_res_handle)
+	if (bo->created)
 		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
 	if (bo->pages)
 		virtio_gpu_object_free_sg_table(bo);
 	if (bo->vmap)
 		virtio_gpu_object_kunmap(bo);
 	drm_gem_object_release(&bo->gem_base);
+	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 	kfree(bo);
 }
 
@@ -81,9 +99,15 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
 	if (bo == NULL)
 		return -ENOMEM;
+	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+	if (ret < 0) {
+		kfree(bo);
+		return ret;
+	}
 	size = roundup(size, PAGE_SIZE);
 	ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
 	if (ret != 0) {
+		virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 		kfree(bo);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7..ead5c53 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 				      plane->state->src_h >> 16);
 }
 
+static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
+					struct drm_plane_state *new_state)
+{
+	struct drm_device *dev = plane->dev;
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_object *bo;
+
+	if (!new_state->fb)
+		return 0;
+
+	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+	if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
+		vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+		if (!vgfb->fence)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
+					 struct drm_plane_state *old_state)
+{
+	struct virtio_gpu_framebuffer *vgfb;
+
+	if (!plane->state->fb)
+		return;
+
+	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+	if (vgfb->fence)
+		virtio_gpu_fence_cleanup(vgfb->fence);
+}
+
 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 					   struct drm_plane_state *old_state)
 {
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_output *output = NULL;
 	struct virtio_gpu_framebuffer *vgfb;
-	struct virtio_gpu_fence *fence = NULL;
 	struct virtio_gpu_object *bo = NULL;
 	uint32_t handle;
 	int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 			(vgdev, bo, 0,
 			 cpu_to_le32(plane->state->crtc_w),
 			 cpu_to_le32(plane->state->crtc_h),
-			 0, 0, &fence);
+			 0, 0, vgfb->fence);
 		ret = virtio_gpu_object_reserve(bo, false);
 		if (!ret) {
 			reservation_object_add_excl_fence(bo->tbo.resv,
-							  &fence->f);
-			dma_fence_put(&fence->f);
-			fence = NULL;
+							  &vgfb->fence->f);
+			dma_fence_put(&vgfb->fence->f);
+			vgfb->fence = NULL;
 			virtio_gpu_object_unreserve(bo);
 			virtio_gpu_object_wait(bo, false);
 		}
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
 };
 
 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
+	.prepare_fb		= virtio_gpu_cursor_prepare_fb,
+	.cleanup_fb		= virtio_gpu_cursor_cleanup_fb,
 	.atomic_check		= virtio_gpu_plane_atomic_check,
 	.atomic_update		= virtio_gpu_cursor_plane_update,
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index e3152d4..4bfbf25fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
 	return vgdev;
 }
 
-static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
-{
-	struct drm_global_reference *global_ref;
-	int r;
-
-	vgdev->mman.mem_global_referenced = false;
-	global_ref = &vgdev->mman.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &virtio_gpu_ttm_mem_global_init;
-	global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM memory accounting "
-			  "subsystem.\n");
-		return r;
-	}
-
-	vgdev->mman.bo_global_ref.mem_glob =
-		vgdev->mman.mem_global_ref.object;
-	global_ref = &vgdev->mman.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&vgdev->mman.mem_global_ref);
-		return r;
-	}
-
-	vgdev->mman.mem_global_referenced = true;
-	return 0;
-}
-
-static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
-{
-	if (vgdev->mman.mem_global_referenced) {
-		drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
-		drm_global_item_unref(&vgdev->mman.mem_global_ref);
-		vgdev->mman.mem_global_referenced = false;
-	}
-}
-
 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct drm_file *file_priv;
@@ -347,8 +291,7 @@ static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
 
 	} else if (new_mem->placement & TTM_PL_FLAG_TT) {
 		if (bo->hw_res_handle) {
-			virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
-						 NULL);
+			virtio_gpu_object_attach(vgdev, bo, NULL);
 		}
 	}
 }
@@ -383,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 {
 	int r;
 
-	r = virtio_gpu_ttm_global_init(vgdev);
-	if (r)
-		return r;
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&vgdev->mman.bdev,
-			       vgdev->mman.bo_global_ref.ref.object,
 			       &virtio_gpu_bo_driver,
 			       vgdev->ddev->anon_inode->i_mapping,
 			       DRM_FILE_PAGE_OFFSET, 0);
@@ -407,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
 err_mm_init:
 	ttm_bo_device_release(&vgdev->mman.bdev);
 err_dev_init:
-	virtio_gpu_ttm_global_fini(vgdev);
 	return r;
 }
 
 void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
 {
 	ttm_bo_device_release(&vgdev->mman.bdev);
-	virtio_gpu_ttm_global_fini(vgdev);
 	DRM_INFO("virtio_gpu: ttm finalized\n");
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 4e2e037..e27c4ae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -38,26 +38,6 @@
 			       + MAX_INLINE_CMD_SIZE		 \
 			       + MAX_INLINE_RESP_SIZE)
 
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
-				uint32_t *resid)
-{
-	int handle;
-
-	idr_preload(GFP_KERNEL);
-	spin_lock(&vgdev->resource_idr_lock);
-	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
-	spin_unlock(&vgdev->resource_idr_lock);
-	idr_preload_end();
-	*resid = handle;
-}
-
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
-	spin_lock(&vgdev->resource_idr_lock);
-	idr_remove(&vgdev->resource_idr, id);
-	spin_unlock(&vgdev->resource_idr_lock);
-}
-
 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
 {
 	struct drm_device *dev = vq->vdev->priv;
@@ -98,10 +78,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
 {
 	struct virtio_gpu_vbuffer *vbuf;
 
-	vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
+	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
 	if (!vbuf)
 		return ERR_PTR(-ENOMEM);
-	memset(vbuf, 0, VBUFFER_SIZE);
 
 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -319,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 					       struct virtio_gpu_vbuffer *vbuf,
 					       struct virtio_gpu_ctrl_hdr *hdr,
-					       struct virtio_gpu_fence **fence)
+					       struct virtio_gpu_fence *fence)
 {
 	struct virtqueue *vq = vgdev->ctrlq.vq;
 	int rc;
@@ -388,7 +367,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 
 /* create a basic resource */
 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
-				    uint32_t resource_id,
+				    struct virtio_gpu_object *bo,
 				    uint32_t format,
 				    uint32_t width,
 				    uint32_t height)
@@ -400,12 +379,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 	memset(cmd_p, 0, sizeof(*cmd_p));
 
 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
-	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 	cmd_p->format = cpu_to_le32(format);
 	cmd_p->width = cpu_to_le32(width);
 	cmd_p->height = cpu_to_le32(height);
 
 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	bo->created = true;
 }
 
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
@@ -425,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 
 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
 						  uint32_t resource_id,
-						  struct virtio_gpu_fence **fence)
+						  struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_resource_detach_backing *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -487,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 					uint64_t offset,
 					__le32 width, __le32 height,
 					__le32 x, __le32 y,
-					struct virtio_gpu_fence **fence)
+					struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -517,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 				       uint32_t resource_id,
 				       struct virtio_gpu_mem_entry *ents,
 				       uint32_t nents,
-				       struct virtio_gpu_fence **fence)
+				       struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_resource_attach_backing *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -604,6 +584,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
 	wake_up(&vgdev->resp_wq);
 }
 
+static int virtio_get_edid_block(void *data, u8 *buf,
+				 unsigned int block, size_t len)
+{
+	struct virtio_gpu_resp_edid *resp = data;
+	size_t start = block * EDID_LENGTH;
+
+	if (start + len > le32_to_cpu(resp->size))
+		return -1;
+	memcpy(buf, resp->edid + start, len);
+	return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+				       struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtio_gpu_cmd_get_edid *cmd =
+		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+	struct virtio_gpu_resp_edid *resp =
+		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+	uint32_t scanout = le32_to_cpu(cmd->scanout);
+	struct virtio_gpu_output *output;
+	struct edid *new_edid, *old_edid;
+
+	if (scanout >= vgdev->num_scanouts)
+		return;
+	output = vgdev->outputs + scanout;
+
+	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+
+	spin_lock(&vgdev->display_info_lock);
+	old_edid = output->edid;
+	output->edid = new_edid;
+	drm_connector_update_edid_property(&output->conn, output->edid);
+	spin_unlock(&vgdev->display_info_lock);
+
+	kfree(old_edid);
+	wake_up(&vgdev->resp_wq);
+}
+
 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
 {
 	struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -706,6 +725,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
 	return 0;
 }
 
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+	struct virtio_gpu_cmd_get_edid *cmd_p;
+	struct virtio_gpu_vbuffer *vbuf;
+	void *resp_buf;
+	int scanout;
+
+	if (WARN_ON(!vgdev->has_edid))
+		return -EINVAL;
+
+	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+				   GFP_KERNEL);
+		if (!resp_buf)
+			return -ENOMEM;
+
+		cmd_p = virtio_gpu_alloc_cmd_resp
+			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+			 resp_buf);
+		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+		cmd_p->scanout = cpu_to_le32(scanout);
+		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	}
+
+	return 0;
+}
+
 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
 				   uint32_t nlen, const char *name)
 {
@@ -772,8 +819,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
 
 void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
-				  struct virtio_gpu_resource_create_3d *rc_3d,
-				  struct virtio_gpu_fence **fence)
+				  struct virtio_gpu_object *bo,
+				  struct virtio_gpu_resource_create_3d *rc_3d)
 {
 	struct virtio_gpu_resource_create_3d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -785,7 +832,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
 	cmd_p->hdr.flags = 0;
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+	bo->created = true;
 }
 
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
@@ -793,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 					uint32_t ctx_id,
 					uint64_t offset, uint32_t level,
 					struct virtio_gpu_box *box,
-					struct virtio_gpu_fence **fence)
+					struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_transfer_host_3d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -821,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 					  uint32_t resource_id, uint32_t ctx_id,
 					  uint64_t offset, uint32_t level,
 					  struct virtio_gpu_box *box,
-					  struct virtio_gpu_fence **fence)
+					  struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_transfer_host_3d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -841,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 			   void *data, uint32_t data_size,
-			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
+			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
 {
 	struct virtio_gpu_cmd_submit *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
@@ -861,14 +909,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object *obj,
-			     uint32_t resource_id,
-			     struct virtio_gpu_fence **fence)
+			     struct virtio_gpu_fence *fence)
 {
 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 	struct virtio_gpu_mem_entry *ents;
 	struct scatterlist *sg;
 	int si, nents;
 
+	if (!obj->created)
+		return 0;
+
 	if (!obj->pages) {
 		int ret;
 
@@ -902,10 +952,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 		ents[si].padding = 0;
 	}
 
-	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
 					       ents, nents,
 					       fence);
-	obj->hw_res_handle = resource_id;
 	return 0;
 }
 
@@ -913,11 +962,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
 			      struct virtio_gpu_object *obj)
 {
 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-	struct virtio_gpu_fence *fence;
 
 	if (use_dma_api && obj->mapped) {
+		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
 		/* detach backing and wait for the host process it ... */
-		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
 		dma_fence_wait(&fence->f, true);
 		dma_fence_put(&fence->f);
 
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 07cfde1..8308787 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -68,7 +68,6 @@ static struct drm_driver vkms_driver = {
 	.release		= vkms_release,
 	.fops			= &vkms_driver_fops,
 	.dumb_create		= vkms_dumb_create,
-	.dumb_map_offset	= vkms_dumb_map,
 	.gem_vm_ops		= &vkms_gem_vm_ops,
 	.gem_free_object_unlocked = vkms_gem_free_object,
 	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
@@ -108,17 +107,18 @@ static int __init vkms_init(void)
 	if (!vkms_device)
 		return -ENOMEM;
 
-	ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
-	if (ret)
-		goto out_free;
-
 	vkms_device->platform =
 		platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
 	if (IS_ERR(vkms_device->platform)) {
 		ret = PTR_ERR(vkms_device->platform);
-		goto out_fini;
+		goto out_free;
 	}
 
+	ret = drm_dev_init(&vkms_device->drm, &vkms_driver,
+			   &vkms_device->platform->dev);
+	if (ret)
+		goto out_unregister;
+
 	vkms_device->drm.irq_enabled = true;
 
 	ret = drm_vblank_init(&vkms_device->drm, 1);
@@ -129,20 +129,20 @@ static int __init vkms_init(void)
 
 	ret = vkms_modeset_init(vkms_device);
 	if (ret)
-		goto out_unregister;
+		goto out_fini;
 
 	ret = drm_dev_register(&vkms_device->drm, 0);
 	if (ret)
-		goto out_unregister;
+		goto out_fini;
 
 	return 0;
 
-out_unregister:
-	platform_device_unregister(vkms_device->platform);
-
 out_fini:
 	drm_dev_fini(&vkms_device->drm);
 
+out_unregister:
+	platform_device_unregister(vkms_device->platform);
+
 out_free:
 	kfree(vkms_device);
 	return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1c93990..e4469cd 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -127,9 +127,6 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
 		     struct drm_mode_create_dumb *args);
 
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
-		  u32 handle, u64 *offset);
-
 void vkms_gem_free_object(struct drm_gem_object *obj);
 
 int vkms_gem_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index d04e988..80311da 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -153,32 +153,6 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
 	return 0;
 }
 
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
-		  u32 handle, u64 *offset)
-{
-	struct drm_gem_object *obj;
-	int ret;
-
-	obj = drm_gem_object_lookup(file, handle);
-	if (!obj)
-		return -ENOENT;
-
-	if (!obj->filp) {
-		ret = -EINVAL;
-		goto unref;
-	}
-
-	ret = drm_gem_create_mmap_offset(obj);
-	if (ret)
-		goto unref;
-
-	*offset = drm_vma_node_offset_addr(&obj->vma_node);
-unref:
-	drm_gem_object_put_unlocked(obj);
-
-	return ret;
-}
-
 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
 {
 	struct drm_gem_object *gem_obj = &vkms_obj->gem;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 7041007..4188176 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
 		return NULL;
 
 	crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
-	if (WARN_ON(!crc_data))
-		DRM_INFO("Couldn't allocate crc_data");
+	if (!crc_data) {
+		DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+		kfree(vkms_state);
+		return NULL;
+	}
 
 	vkms_state->crc_data = crc_data;
 
@@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane,
 			   struct drm_plane_state *state)
 {
 	struct drm_gem_object *gem_obj;
-	struct vkms_gem_object *vkms_obj;
 	int ret;
 
 	if (!state->fb)
 		return 0;
 
 	gem_obj = drm_gem_fb_get_obj(state->fb, 0);
-	vkms_obj = drm_gem_to_vkms_gem(gem_obj);
 	ret = vkms_gem_vmap(gem_obj);
 	if (ret)
 		DRM_ERROR("vmap failed: %d\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d7a2dfb..f05a29f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,7 +667,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	mutex_init(&dev_priv->binding_mutex);
-	mutex_init(&dev_priv->requested_layout_mutex);
 	mutex_init(&dev_priv->global_kms_state_mutex);
 	ttm_lock_init(&dev_priv->reservation_sem);
 	spin_lock_init(&dev_priv->resource_lock);
@@ -803,11 +802,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 
-	ret = vmw_ttm_global_init(dev_priv);
-	if (unlikely(ret != 0))
-		goto out_err0;
-
-
 	vmw_master_init(&dev_priv->fbdev_master);
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	dev_priv->active_master = &dev_priv->fbdev_master;
@@ -818,7 +812,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (unlikely(dev_priv->mmio_virt == NULL)) {
 		ret = -ENOMEM;
 		DRM_ERROR("Failed mapping MMIO.\n");
-		goto out_err3;
+		goto out_err0;
 	}
 
 	/* Need mmio memory to check for fifo pitchlock cap. */
@@ -830,8 +824,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		goto out_err4;
 	}
 
-	dev_priv->tdev = ttm_object_device_init
-		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+						&vmw_prime_dmabuf_ops);
 
 	if (unlikely(dev_priv->tdev == NULL)) {
 		DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -872,7 +866,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	}
 
 	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
 				 &vmw_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 VMWGFX_FILE_PAGE_OFFSET,
@@ -994,8 +987,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	ttm_object_device_release(&dev_priv->tdev);
 out_err4:
 	memunmap(dev_priv->mmio_virt);
-out_err3:
-	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
@@ -1047,7 +1038,6 @@ static void vmw_driver_unload(struct drm_device *dev)
 	memunmap(dev_priv->mmio_virt);
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index aca974b..cd607ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -417,8 +417,6 @@ enum {
 
 struct vmw_private {
 	struct ttm_bo_device bdev;
-	struct ttm_bo_global_ref bo_global_ref;
-	struct drm_global_reference mem_global_ref;
 
 	struct vmw_fifo_state fifo;
 
@@ -468,15 +466,6 @@ struct vmw_private {
 	uint32_t num_displays;
 
 	/*
-	 * Currently requested_layout_mutex is used to protect the gui
-	 * positionig state in display unit. With that use case currently this
-	 * mutex is only taken during layout ioctl and atomic check_modeset.
-	 * Other display unit state can be protected with this mutex but that
-	 * needs careful consideration.
-	 */
-	struct mutex requested_layout_mutex;
-
-	/*
 	 * Framebuffer info.
 	 */
 
@@ -486,8 +475,6 @@ struct vmw_private {
 	struct vmw_overlay *overlay_priv;
 	struct drm_property *hotplug_mode_update_property;
 	struct drm_property *implicit_placement_property;
-	unsigned num_implicit;
-	struct vmw_framebuffer *implicit_fb;
 	struct mutex global_kms_state_mutex;
 	spinlock_t cursor_lock;
 	struct drm_atomic_state *suspend_state;
@@ -845,8 +832,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
  * TTM glue - vmwgfx_ttm_glue.c
  */
 
-extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
-extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
@@ -1368,7 +1353,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
 
 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
 {
-	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+	return &ttm_mem_glob;
 }
 
 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f872615..301260e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -906,13 +906,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 		container_of(action, struct vmw_event_fence_action, action);
 	struct drm_device *dev = eaction->dev;
 	struct drm_pending_event *event = eaction->event;
-	struct drm_file *file_priv;
-
 
 	if (unlikely(event == NULL))
 		return;
 
-	file_priv = event->file_priv;
 	spin_lock_irq(&dev->event_lock);
 
 	if (likely(eaction->tv_sec != NULL)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index dca04d4..b351fb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_rect.h>
+#include <drm/drm_damage_helper.h>
 
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -456,21 +457,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 		struct drm_crtc *crtc = state->crtc;
 		struct vmw_connector_state *vcs;
 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-		struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-		struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
 
 		vcs = vmw_connector_state_to_vcs(du->connector.state);
-
-		/* Only one active implicit framebuffer at a time. */
-		mutex_lock(&dev_priv->global_kms_state_mutex);
-		if (vcs->is_implicit && dev_priv->implicit_fb &&
-		    !(dev_priv->num_implicit == 1 && du->active_implicit)
-		    && dev_priv->implicit_fb != vfb) {
-			DRM_ERROR("Multiple implicit framebuffers "
-				  "not supported.\n");
-			ret = -EINVAL;
-		}
-		mutex_unlock(&dev_priv->global_kms_state_mutex);
 	}
 
 
@@ -493,24 +481,24 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 				     struct drm_plane_state *new_state)
 {
 	int ret = 0;
+	struct drm_crtc_state *crtc_state = NULL;
 	struct vmw_surface *surface = NULL;
 	struct drm_framebuffer *fb = new_state->fb;
 
-	struct drm_rect src = drm_plane_state_src(new_state);
-	struct drm_rect dest = drm_plane_state_dest(new_state);
+	if (new_state->crtc)
+		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+							   new_state->crtc);
+
+	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, true);
+	if (ret)
+		return ret;
 
 	/* Turning off */
 	if (!fb)
-		return ret;
-
-	ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
-					    &src, &dest,
-					    DRM_MODE_ROTATE_0,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    true, true, &new_state->visible);
-	if (!ret)
-		return ret;
+		return 0;
 
 	/* A lot of the code assumes this */
 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
@@ -846,58 +834,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 	kfree(vfbs);
 }
 
-static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
-				  struct drm_file *file_priv,
-				  unsigned flags, unsigned color,
-				  struct drm_clip_rect *clips,
-				  unsigned num_clips)
-{
-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
-	struct vmw_framebuffer_surface *vfbs =
-		vmw_framebuffer_to_vfbs(framebuffer);
-	struct drm_clip_rect norect;
-	int ret, inc = 1;
-
-	/* Legacy Display Unit does not support 3D */
-	if (dev_priv->active_display_unit == vmw_du_legacy)
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev_priv->dev);
-
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-	if (unlikely(ret != 0)) {
-		drm_modeset_unlock_all(dev_priv->dev);
-		return ret;
-	}
-
-	if (!num_clips) {
-		num_clips = 1;
-		clips = &norect;
-		norect.x1 = norect.y1 = 0;
-		norect.x2 = framebuffer->width;
-		norect.y2 = framebuffer->height;
-	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
-		num_clips /= 2;
-		inc = 2; /* skip source rects */
-	}
-
-	if (dev_priv->active_display_unit == vmw_du_screen_object)
-		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
-						   clips, NULL, NULL, 0, 0,
-						   num_clips, inc, NULL, NULL);
-	else
-		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
-						 clips, NULL, NULL, 0, 0,
-						 num_clips, inc, NULL, NULL);
-
-	vmw_fifo_flush(dev_priv, false);
-	ttm_read_unlock(&dev_priv->reservation_sem);
-
-	drm_modeset_unlock_all(dev_priv->dev);
-
-	return 0;
-}
-
 /**
  * vmw_kms_readback - Perform a readback from the screen system to
  * a buffer-object backed framebuffer.
@@ -941,7 +877,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
 
 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 	.destroy = vmw_framebuffer_surface_destroy,
-	.dirty = vmw_framebuffer_surface_dirty,
+	.dirty = drm_atomic_helper_dirtyfb,
 };
 
 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -1084,16 +1020,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
 	}
 
 	switch (dev_priv->active_display_unit) {
-	case vmw_du_screen_target:
-		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
-				       clips, NULL, num_clips, increment,
-				       true, true, NULL);
-		break;
-	case vmw_du_screen_object:
-		ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
-					      clips, NULL, num_clips,
-					      increment, true, NULL, NULL);
-		break;
 	case vmw_du_legacy:
 		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
 					      clips, num_clips, increment);
@@ -1112,9 +1038,25 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
 	return ret;
 }
 
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+					struct drm_file *file_priv,
+					unsigned int flags, unsigned int color,
+					struct drm_clip_rect *clips,
+					unsigned int num_clips)
+{
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+						color, clips, num_clips);
+
+	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+					 clips, num_clips);
+}
+
 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
 	.destroy = vmw_framebuffer_bo_destroy,
-	.dirty = vmw_framebuffer_bo_dirty,
+	.dirty = vmw_framebuffer_bo_dirty_ext,
 };
 
 /**
@@ -1565,6 +1507,88 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
 }
 
 /**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *crtc_state;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	if (crtc_state) {
+		lockdep_assert_held(&crtc->mutex.mutex.base);
+	} else {
+		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+		if (ret != 0 && ret != -EALREADY)
+			return ERR_PTR(ret);
+
+		crtc_state = crtc->state;
+	}
+
+	return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ *   Zero on success,
+ *   -EINVAL on invalid state,
+ *   -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+				  struct drm_atomic_state *state)
+{
+	struct drm_framebuffer *implicit_fb = NULL;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane_state *plane_state;
+
+	drm_for_each_crtc(crtc, dev) {
+		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+		if (!du->is_implicit)
+			continue;
+
+		crtc_state = vmw_crtc_state_and_lock(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state || !crtc_state->enable)
+			continue;
+
+		/*
+		 * Can't move primary planes across crtcs, so this is OK.
+		 * It also means we don't need to take the plane mutex.
+		 */
+		plane_state = du->primary.state;
+		if (plane_state->crtc != crtc)
+			continue;
+
+		if (!implicit_fb)
+			implicit_fb = plane_state->fb;
+		else if (implicit_fb != plane_state->fb)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
  * vmw_kms_check_topology - Validates topology in drm_atomic_state
  * @dev: DRM device
  * @state: the driver state object
@@ -1575,7 +1599,6 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
 static int vmw_kms_check_topology(struct drm_device *dev,
 				  struct drm_atomic_state *state)
 {
-	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_rect *rects;
 	struct drm_crtc *crtc;
@@ -1587,19 +1610,31 @@ static int vmw_kms_check_topology(struct drm_device *dev,
 	if (!rects)
 		return -ENOMEM;
 
-	mutex_lock(&dev_priv->requested_layout_mutex);
-
 	drm_for_each_crtc(crtc, dev) {
 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-		struct drm_crtc_state *crtc_state = crtc->state;
+		struct drm_crtc_state *crtc_state;
 
 		i = drm_crtc_index(crtc);
 
-		if (crtc_state && crtc_state->enable) {
+		crtc_state = vmw_crtc_state_and_lock(state, crtc);
+		if (IS_ERR(crtc_state)) {
+			ret = PTR_ERR(crtc_state);
+			goto clean;
+		}
+
+		if (!crtc_state)
+			continue;
+
+		if (crtc_state->enable) {
 			rects[i].x1 = du->gui_x;
 			rects[i].y1 = du->gui_y;
 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+		} else {
+			rects[i].x1 = 0;
+			rects[i].y1 = 0;
+			rects[i].x2 = 0;
+			rects[i].y2 = 0;
 		}
 	}
 
@@ -1611,14 +1646,6 @@ static int vmw_kms_check_topology(struct drm_device *dev,
 		struct drm_connector_state *conn_state;
 		struct vmw_connector_state *vmw_conn_state;
 
-		if (!new_crtc_state->enable) {
-			rects[i].x1 = 0;
-			rects[i].y1 = 0;
-			rects[i].x2 = 0;
-			rects[i].y2 = 0;
-			continue;
-		}
-
 		if (!du->pref_active) {
 			ret = -EINVAL;
 			goto clean;
@@ -1639,18 +1666,12 @@ static int vmw_kms_check_topology(struct drm_device *dev,
 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
 		vmw_conn_state->gui_x = du->gui_x;
 		vmw_conn_state->gui_y = du->gui_y;
-
-		rects[i].x1 = du->gui_x;
-		rects[i].y1 = du->gui_y;
-		rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
-		rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
 	}
 
 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
 					   rects);
 
 clean:
-	mutex_unlock(&dev_priv->requested_layout_mutex);
 	kfree(rects);
 	return ret;
 }
@@ -1681,6 +1702,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
 	if (ret)
 		return ret;
 
+	ret = vmw_kms_check_implicit(dev, state);
+	if (ret)
+		return ret;
+
 	if (!state->allow_modeset)
 		return ret;
 
@@ -2003,11 +2028,25 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
 	struct vmw_display_unit *du;
 	struct drm_connector *con;
 	struct drm_connector_list_iter conn_iter;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_crtc *crtc;
+	int ret;
 
-	/*
-	 * Currently only gui_x/y is protected with requested_layout_mutex.
-	 */
-	mutex_lock(&dev_priv->requested_layout_mutex);
+	/* Currently gui_x/y is protected with the crtc mutex */
+	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_acquire_init(&ctx, 0);
+retry:
+	drm_for_each_crtc(crtc, dev) {
+		ret = drm_modeset_lock(&crtc->mutex, &ctx);
+		if (ret < 0) {
+			if (ret == -EDEADLK) {
+				drm_modeset_backoff(&ctx);
+				goto retry;
+      		}
+			goto out_fini;
+		}
+	}
+
 	drm_connector_list_iter_begin(dev, &conn_iter);
 	drm_for_each_connector_iter(con, &conn_iter) {
 		du = vmw_connector_to_du(con);
@@ -2026,9 +2065,7 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
 		}
 	}
 	drm_connector_list_iter_end(&conn_iter);
-	mutex_unlock(&dev_priv->requested_layout_mutex);
 
-	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
 		du = vmw_connector_to_du(con);
 		if (num_rects > du->unit) {
@@ -2048,10 +2085,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
 		}
 		con->status = vmw_du_connector_detect(con, true);
 	}
-	mutex_unlock(&dev->mode_config.mutex);
 
 	drm_sysfs_hotplug_event(dev);
-
+out_fini:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	mutex_unlock(&dev->mode_config.mutex);
+ 
 	return 0;
 }
 
@@ -2275,84 +2315,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
 	return 1;
 }
 
-int vmw_du_connector_set_property(struct drm_connector *connector,
-				  struct drm_property *property,
-				  uint64_t val)
-{
-	struct vmw_display_unit *du = vmw_connector_to_du(connector);
-	struct vmw_private *dev_priv = vmw_priv(connector->dev);
-
-	if (property == dev_priv->implicit_placement_property)
-		du->is_implicit = val;
-
-	return 0;
-}
-
-
-
-/**
- * vmw_du_connector_atomic_set_property - Atomic version of get property
- *
- * @crtc - crtc the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_set_property(struct drm_connector *connector,
-				     struct drm_connector_state *state,
-				     struct drm_property *property,
-				     uint64_t val)
-{
-	struct vmw_private *dev_priv = vmw_priv(connector->dev);
-	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-	struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
-
-	if (property == dev_priv->implicit_placement_property) {
-		vcs->is_implicit = val;
-
-		/*
-		 * We should really be doing a drm_atomic_commit() to
-		 * commit the new state, but since this doesn't cause
-		 * an immedate state change, this is probably ok
-		 */
-		du->is_implicit = vcs->is_implicit;
-	} else {
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-
-/**
- * vmw_du_connector_atomic_get_property - Atomic version of get property
- *
- * @connector - connector the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_get_property(struct drm_connector *connector,
-				     const struct drm_connector_state *state,
-				     struct drm_property *property,
-				     uint64_t *val)
-{
-	struct vmw_private *dev_priv = vmw_priv(connector->dev);
-	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-
-	if (property == dev_priv->implicit_placement_property)
-		*val = vcs->is_implicit;
-	else {
-		DRM_ERROR("Invalid Property %s\n", property->name);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 /**
  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
  * @dev: drm device for the ioctl
@@ -2742,143 +2704,25 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 }
 
 /**
- * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
- *
- * @dev_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- */
-void vmw_kms_del_active(struct vmw_private *dev_priv,
-			struct vmw_display_unit *du)
-{
-	mutex_lock(&dev_priv->global_kms_state_mutex);
-	if (du->active_implicit) {
-		if (--(dev_priv->num_implicit) == 0)
-			dev_priv->implicit_fb = NULL;
-		du->active_implicit = false;
-	}
-	mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
- *
- * @vmw_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- * @vfb: The implicit framebuffer
- *
- * Registers a binding to an implicit framebuffer.
- */
-void vmw_kms_add_active(struct vmw_private *dev_priv,
-			struct vmw_display_unit *du,
-			struct vmw_framebuffer *vfb)
-{
-	mutex_lock(&dev_priv->global_kms_state_mutex);
-	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
-
-	if (!du->active_implicit && du->is_implicit) {
-		dev_priv->implicit_fb = vfb;
-		du->active_implicit = true;
-		dev_priv->num_implicit++;
-	}
-	mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc we want to flip.
- *
- * Returns true or false depending whether it's OK to flip this crtc
- * based on the criterion that we must not have more than one implicit
- * frame-buffer at any one time.
- */
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
-			    struct drm_crtc *crtc)
-{
-	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-	bool ret;
-
-	mutex_lock(&dev_priv->global_kms_state_mutex);
-	ret = !du->is_implicit || dev_priv->num_implicit == 1;
-	mutex_unlock(&dev_priv->global_kms_state_mutex);
-
-	return ret;
-}
-
-/**
- * vmw_kms_update_implicit_fb - Update the implicit fb.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc the new implicit frame-buffer is bound to.
- */
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
-				struct drm_crtc *crtc)
-{
-	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-	struct drm_plane *plane = crtc->primary;
-	struct vmw_framebuffer *vfb;
-
-	mutex_lock(&dev_priv->global_kms_state_mutex);
-
-	if (!du->is_implicit)
-		goto out_unlock;
-
-	vfb = vmw_framebuffer_to_vfb(plane->state->fb);
-	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
-		     dev_priv->implicit_fb != vfb);
-
-	dev_priv->implicit_fb = vfb;
-out_unlock:
-	mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
  * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
  * property.
  *
  * @dev_priv: Pointer to a device private struct.
- * @immutable: Whether the property is immutable.
  *
  * Sets up the implicit placement property unless it's already set up.
  */
 void
-vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
-					   bool immutable)
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
 {
 	if (dev_priv->implicit_placement_property)
 		return;
 
 	dev_priv->implicit_placement_property =
 		drm_property_create_range(dev_priv->dev,
-					  immutable ?
-					  DRM_MODE_PROP_IMMUTABLE : 0,
+					  DRM_MODE_PROP_IMMUTABLE,
 					  "implicit_placement", 0, 1);
-
 }
 
-
-/**
- * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
- *
- * @set: The configuration to set.
- *
- * The vmwgfx Xorg driver doesn't assign the mode::type member, which
- * when drm_mode_set_crtcinfo is called as part of the configuration setting
- * causes it to return incorrect crtc dimensions causing severe problems in
- * the vmwgfx modesetting. So explicitly clear that member before calling
- * into drm_atomic_helper_set_config.
- */
-int vmw_kms_set_config(struct drm_mode_set *set,
-		       struct drm_modeset_acquire_ctx *ctx)
-{
-	if (set && set->mode)
-		set->mode->type = 0;
-
-	return drm_atomic_helper_set_config(set, ctx);
-}
-
-
 /**
  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
  *
@@ -2935,3 +2779,124 @@ void vmw_kms_lost_device(struct drm_device *dev)
 {
 	drm_atomic_helper_shutdown(dev);
 }
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_plane_state *old_state = update->old_state;
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_rect clip;
+	struct drm_rect bb;
+	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+	uint32_t reserved_size = 0;
+	uint32_t submit_size = 0;
+	uint32_t curr_size = 0;
+	uint32_t num_hits = 0;
+	void *cmd_start;
+	char *cmd_next;
+	int ret;
+
+	/*
+	 * Iterate in advance to check if really need plane update and find the
+	 * number of clips that actually are in plane src for fifo allocation.
+	 */
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip)
+		num_hits++;
+
+	if (num_hits == 0)
+		return 0;
+
+	if (update->vfb->bo) {
+		struct vmw_framebuffer_bo *vfbbo =
+			container_of(update->vfb, typeof(*vfbbo), base);
+
+		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+					    update->cpu_blit);
+	} else {
+		struct vmw_framebuffer_surface *vfbs =
+			container_of(update->vfb, typeof(*vfbs), base);
+
+		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+						  0, NULL, NULL);
+	}
+
+	if (ret)
+		return ret;
+
+	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+	if (ret)
+		goto out_unref;
+
+	reserved_size = update->calc_fifo_size(update, num_hits);
+	cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+	if (!cmd_start) {
+		ret = -ENOMEM;
+		goto out_revert;
+	}
+
+	cmd_next = cmd_start;
+
+	if (update->post_prepare) {
+		curr_size = update->post_prepare(update, cmd_next);
+		cmd_next += curr_size;
+		submit_size += curr_size;
+	}
+
+	if (update->pre_clip) {
+		curr_size = update->pre_clip(update, cmd_next, num_hits);
+		cmd_next += curr_size;
+		submit_size += curr_size;
+	}
+
+	bb.x1 = INT_MAX;
+	bb.y1 = INT_MAX;
+	bb.x2 = INT_MIN;
+	bb.y2 = INT_MIN;
+
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		uint32_t fb_x = clip.x1;
+		uint32_t fb_y = clip.y1;
+
+		vmw_du_translate_to_crtc(state, &clip);
+		if (update->clip) {
+			curr_size = update->clip(update, cmd_next, &clip, fb_x,
+						 fb_y);
+			cmd_next += curr_size;
+			submit_size += curr_size;
+		}
+		bb.x1 = min_t(int, bb.x1, clip.x1);
+		bb.y1 = min_t(int, bb.y1, clip.y1);
+		bb.x2 = max_t(int, bb.x2, clip.x2);
+		bb.y2 = max_t(int, bb.y2, clip.y2);
+	}
+
+	curr_size = update->post_clip(update, cmd_next, &bb);
+	submit_size += curr_size;
+
+	if (reserved_size < submit_size)
+		submit_size = 0;
+
+	vmw_fifo_commit(update->dev_priv, submit_size);
+
+	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+					 update->out_fence, NULL);
+	return ret;
+
+out_revert:
+	vmw_validation_revert(&val_ctx);
+
+out_unref:
+	vmw_validation_unref_lists(&val_ctx);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 76ec570..655abbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -33,7 +33,123 @@
 #include <drm/drm_encoder.h>
 #include "vmwgfx_drv.h"
 
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+	/**
+	 * @calc_fifo_size: Calculate fifo size.
+	 *
+	 * Determine fifo size for the commands needed for update. The number of
+	 * damage clips on display unit @num_hits will be passed to allocate
+	 * sufficient fifo space.
+	 *
+	 * Return: Fifo size needed
+	 */
+	uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+				   uint32_t num_hits);
 
+	/**
+	 * @post_prepare: Populate fifo for resource preparation.
+	 *
+	 * Some surface resource or buffer object need some extra cmd submission
+	 * like update GB image for proxy surface and define a GMRFB for screen
+	 * object. That should should be done here as this callback will be
+	 * called after FIFO allocation with the address of command buufer.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+	/**
+	 * @pre_clip: Populate fifo before clip.
+	 *
+	 * This is where pre clip related command should be populated like
+	 * surface copy/DMA, etc.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+			     uint32_t num_hits);
+
+	/**
+	 * @clip: Populate fifo for clip.
+	 *
+	 * This is where to populate clips for surface copy/dma or blit commands
+	 * if needed. This will be called times have damage in display unit,
+	 * which is one if doing full update. @clip is the damage in destination
+	 * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+	 * framebuffer coordinate.
+	 *
+	 * This callback is optional.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+			 struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+	/**
+	 * @post_clip: Populate fifo after clip.
+	 *
+	 * This is where to populate display unit update commands or blit
+	 * commands.
+	 *
+	 * Return: Size of commands populated to command buffer.
+	 */
+	uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+				    struct drm_rect *bb);
+
+	struct drm_plane *plane;
+	struct drm_plane_state *old_state;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *du;
+	struct vmw_framebuffer *vfb;
+	struct vmw_fence_obj **out_fence;
+	struct mutex *mutex;
+	bool cpu_blit;
+	bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+	struct vmw_du_update_plane base;
+	/* This member is to handle special case SOU surface update */
+	void *cmd_start;
+};
+
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+	struct vmw_du_update_plane base;
+	int fb_left, fb_top;
+};
 
 /**
  * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
@@ -191,8 +307,6 @@ struct vmw_plane_state {
 struct vmw_connector_state {
 	struct drm_connector_state base;
 
-	bool is_implicit;
-
 	/**
 	 * @gui_x:
 	 *
@@ -254,7 +368,6 @@ struct vmw_display_unit {
 	int gui_x;
 	int gui_y;
 	bool is_implicit;
-	bool active_implicit;
 	int set_gui_x;
 	int set_gui_y;
 };
@@ -334,17 +447,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 			    struct drm_crtc **p_crtc,
 			    struct drm_display_mode **p_mode);
 void vmw_guess_mode_timing(struct drm_display_mode *mode);
-void vmw_kms_del_active(struct vmw_private *dev_priv,
-			struct vmw_display_unit *du);
-void vmw_kms_add_active(struct vmw_private *dev_priv,
-			struct vmw_display_unit *du,
-			struct vmw_framebuffer *vfb);
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
-			    struct drm_crtc *crtc);
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
-				struct drm_crtc *crtc);
-void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
-						bool immutable);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
 
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
@@ -456,6 +560,20 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 		     bool interruptible,
 		     struct drm_crtc *crtc);
 
-int vmw_kms_set_config(struct drm_mode_set *set,
-		       struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+					    struct drm_rect *r)
+{
+	int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+	int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+	drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 7235781..16be515 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -233,7 +233,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
 	.reset = vmw_du_crtc_reset,
 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
-	.set_config = vmw_kms_set_config,
+	.set_config = drm_atomic_helper_set_config,
 };
 
 
@@ -263,18 +263,14 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.detect = vmw_du_connector_detect,
 	.fill_modes = vmw_du_connector_fill_modes,
-	.set_property = vmw_du_connector_set_property,
 	.destroy = vmw_ldu_connector_destroy,
 	.reset = vmw_du_connector_reset,
 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
 	.atomic_destroy_state = vmw_du_connector_destroy_state,
-	.atomic_set_property = vmw_du_connector_atomic_set_property,
-	.atomic_get_property = vmw_du_connector_atomic_get_property,
 };
 
 static const struct
 drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
-	.best_encoder = drm_atomic_helper_best_encoder,
 };
 
 /*
@@ -417,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
 
-
 	vmw_du_connector_reset(connector);
 	ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
 				 DRM_MODE_CONNECTOR_VIRTUAL);
@@ -428,8 +423,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
 	connector->status = vmw_du_connector_detect(connector, true);
-	vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
-
 
 	ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
 			       DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -448,7 +441,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 		goto err_free_encoder;
 	}
 
-
 	vmw_du_crtc_reset(crtc);
 	ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
 					&ldu->base.cursor,
@@ -514,7 +506,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 	if (ret != 0)
 		goto err_free;
 
-	vmw_kms_create_implicit_placement_property(dev_priv, true);
+	vmw_kms_create_implicit_placement_property(dev_priv);
 
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
 		for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8a029ba..3025bfc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
 			struct ttm_validate_buffer val_buf;
 
 			val_buf.bo = bo;
-			val_buf.shared = false;
+			val_buf.num_shared = 0;
 			res->func->unbind(res, false, &val_buf);
 		}
 		res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 
 	INIT_LIST_HEAD(&val_list);
 	val_buf->bo = ttm_bo_reference(&res->backup->base);
-	val_buf->shared = false;
+	val_buf->num_shared = 0;
 	list_add_tail(&val_buf->head, &val_list);
 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 	if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 	BUG_ON(!func->may_evict);
 
 	val_buf.bo = NULL;
-	val_buf.shared = false;
+	val_buf.num_shared = 0;
 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 	if (unlikely(ret != 0))
 		return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
 		return 0;
 
 	val_buf.bo = NULL;
-	val_buf.shared = false;
+	val_buf.num_shared = 0;
 	if (res->backup)
 		val_buf.bo = &res->backup->base;
 	do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 	struct vmw_resource *res, *next;
 	struct ttm_validate_buffer val_buf = {
 		.bo = &vbo->base,
-		.shared = false
+		.num_shared = 0
 	};
 
 	lockdep_assert_held(&vbo->base.resv->lock.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 53316b1..cd586c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,6 +29,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
 
 
 #define vmw_crtc_to_sou(x) \
@@ -76,6 +77,11 @@ struct vmw_kms_sou_dirty_cmd {
 	SVGA3dCmdBlitSurfaceToScreen body;
 };
 
+struct vmw_kms_sou_define_gmrfb {
+	uint32_t header;
+	SVGAFifoCmdDefineGMRFB body;
+};
+
 /**
  * Display unit using screen objects.
  */
@@ -241,28 +247,20 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
 		sou->buffer = vps->bo;
 		sou->buffer_size = vps->bo_size;
 
-		if (sou->base.is_implicit) {
-			x = crtc->x;
-			y = crtc->y;
-		} else {
-			conn_state = sou->base.connector.state;
-			vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+		conn_state = sou->base.connector.state;
+		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
 
-			x = vmw_conn_state->gui_x;
-			y = vmw_conn_state->gui_y;
-		}
+		x = vmw_conn_state->gui_x;
+		y = vmw_conn_state->gui_y;
 
 		ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
 		if (ret)
 			DRM_ERROR("Failed to define Screen Object %dx%d\n",
 				  crtc->x, crtc->y);
 
-		vmw_kms_add_active(dev_priv, &sou->base, vfb);
 	} else {
 		sou->buffer = NULL;
 		sou->buffer_size = 0;
-
-		vmw_kms_del_active(dev_priv, &sou->base);
 	}
 }
 
@@ -317,38 +315,14 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
 	}
 }
 
-static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
-				  struct drm_framebuffer *new_fb,
-				  struct drm_pending_vblank_event *event,
-				  uint32_t flags,
-				  struct drm_modeset_acquire_ctx *ctx)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	int ret;
-
-	if (!vmw_kms_crtc_flippable(dev_priv, crtc))
-		return -EINVAL;
-
-	ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
-	if (ret) {
-		DRM_ERROR("Page flip error %d.\n", ret);
-		return ret;
-	}
-
-	if (vmw_crtc_to_du(crtc)->is_implicit)
-		vmw_kms_update_implicit_fb(dev_priv, crtc);
-
-	return ret;
-}
-
 static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_sou_crtc_destroy,
 	.reset = vmw_du_crtc_reset,
 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
-	.set_config = vmw_kms_set_config,
-	.page_flip = vmw_sou_crtc_page_flip,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
 };
 
 /*
@@ -377,19 +351,15 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.detect = vmw_du_connector_detect,
 	.fill_modes = vmw_du_connector_fill_modes,
-	.set_property = vmw_du_connector_set_property,
 	.destroy = vmw_sou_connector_destroy,
 	.reset = vmw_du_connector_reset,
 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
 	.atomic_destroy_state = vmw_du_connector_destroy_state,
-	.atomic_set_property = vmw_du_connector_atomic_set_property,
-	.atomic_get_property = vmw_du_connector_atomic_get_property,
 };
 
 
 static const struct
 drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
-	.best_encoder = drm_atomic_helper_best_encoder,
 };
 
 
@@ -499,6 +469,263 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
 	return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
 }
 
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+				     uint32_t num_hits)
+{
+	return sizeof(struct vmw_kms_sou_define_gmrfb) +
+		sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+					void *cmd)
+{
+	struct vmw_framebuffer_bo *vfbbo =
+		container_of(update->vfb, typeof(*vfbbo), base);
+	struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+	int depth = update->vfb->base.format->depth;
+
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
+
+	gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+	gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+	gmr->body.format.colorDepth = depth;
+	gmr->body.format.reserved = 0;
+	gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+	vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+	return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane  *update,
+					 void *cmd, struct drm_rect *clip,
+					 uint32_t fb_x, uint32_t fb_y)
+{
+	struct vmw_kms_sou_bo_blit *blit = cmd;
+
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = update->du->unit;
+	blit->body.srcOrigin.x = fb_x;
+	blit->body.srcOrigin.y = fb_y;
+	blit->body.destRect.left = clip->x1;
+	blit->body.destRect.top = clip->y1;
+	blit->body.destRect.right = clip->x2;
+	blit->body.destRect.bottom = clip->y2;
+
+	return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane  *update,
+				      void *cmd, struct drm_rect *bb)
+{
+	return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+				   struct drm_plane *plane,
+				   struct drm_plane_state *old_state,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_buffer bo_update;
+
+	memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+	bo_update.base.plane = plane;
+	bo_update.base.old_state = old_state;
+	bo_update.base.dev_priv = dev_priv;
+	bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	bo_update.base.vfb = vfb;
+	bo_update.base.out_fence = out_fence;
+	bo_update.base.mutex = NULL;
+	bo_update.base.cpu_blit = false;
+	bo_update.base.intr = true;
+
+	bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+	bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+	bo_update.base.clip = vmw_sou_bo_populate_clip;
+	bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+	return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+					  uint32_t num_hits)
+{
+	return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+		num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+					     void *cmd)
+{
+	struct vmw_du_update_plane_surface *srf_update;
+
+	srf_update = container_of(update, typeof(*srf_update), base);
+
+	/*
+	 * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+	 * its bounding box is filled before iterating over all the clips. So
+	 * store the FIFO start address and revisit to fill the details.
+	 */
+	srf_update->cmd_start = cmd;
+
+	return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_kms_sou_dirty_cmd *blit = cmd;
+	struct vmw_framebuffer_surface *vfbs;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+		num_hits;
+
+	blit->body.srcImage.sid = vfbs->surface->res.id;
+	blit->body.destScreenId = update->du->unit;
+
+	/* Update the source and destination bounding box later in post_clip */
+	blit->body.srcRect.left = 0;
+	blit->body.srcRect.top = 0;
+	blit->body.srcRect.right = 0;
+	blit->body.srcRect.bottom = 0;
+
+	blit->body.destRect.left = 0;
+	blit->body.destRect.top = 0;
+	blit->body.destRect.right = 0;
+	blit->body.destRect.bottom = 0;
+
+	return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+					  void *cmd, struct drm_rect *clip,
+					  uint32_t src_x, uint32_t src_y)
+{
+	SVGASignedRect *rect = cmd;
+
+	/*
+	 * rects are relative to dest bounding box rect on screen object, so
+	 * translate to it later in post_clip
+	 */
+	rect->left = clip->x1;
+	rect->top = clip->y1;
+	rect->right = clip->x2;
+	rect->bottom = clip->y2;
+
+	return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+					  void *cmd, struct drm_rect *bb)
+{
+	struct vmw_du_update_plane_surface *srf_update;
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_rect src_bb;
+	struct vmw_kms_sou_dirty_cmd *blit;
+	SVGASignedRect *rect;
+	uint32_t num_hits;
+	int translate_src_x;
+	int translate_src_y;
+	int i;
+
+	srf_update = container_of(update, typeof(*srf_update), base);
+
+	blit = srf_update->cmd_start;
+	rect = (SVGASignedRect *)&blit[1];
+
+	num_hits = (blit->header.size - sizeof(blit->body))/
+		sizeof(SVGASignedRect);
+
+	src_bb = *bb;
+
+	/* To translate bb back to fb src coord */
+	translate_src_x = (state->src_x >> 16) - state->crtc_x;
+	translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+	drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+	blit->body.srcRect.left = src_bb.x1;
+	blit->body.srcRect.top = src_bb.y1;
+	blit->body.srcRect.right = src_bb.x2;
+	blit->body.srcRect.bottom = src_bb.y2;
+
+	blit->body.destRect.left = bb->x1;
+	blit->body.destRect.top = bb->y1;
+	blit->body.destRect.right = bb->x2;
+	blit->body.destRect.bottom = bb->y2;
+
+	/* rects are relative to dest bb rect */
+	for (i = 0; i < num_hits; i++) {
+		rect->left -= bb->x1;
+		rect->top -= bb->y1;
+		rect->right -= bb->x1;
+		rect->bottom -= bb->y1;
+		rect++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+					struct drm_plane *plane,
+					struct drm_plane_state *old_state,
+					struct vmw_framebuffer *vfb,
+					struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_surface srf_update;
+
+	memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+	srf_update.base.plane = plane;
+	srf_update.base.old_state = old_state;
+	srf_update.base.dev_priv = dev_priv;
+	srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	srf_update.base.vfb = vfb;
+	srf_update.base.out_fence = out_fence;
+	srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+	srf_update.base.cpu_blit = false;
+	srf_update.base.intr = true;
+
+	srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+	srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+	srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+	srf_update.base.clip = vmw_sou_surface_clip_rect;
+	srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+	return vmw_du_helper_plane_update(&srf_update.base);
+}
 
 static void
 vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
@@ -509,47 +736,28 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
 	struct vmw_fence_obj *fence = NULL;
 	int ret;
 
+	/* In case of device error, maintain consistent atomic state */
 	if (crtc && plane->state->fb) {
 		struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 		struct vmw_framebuffer *vfb =
 			vmw_framebuffer_to_vfb(plane->state->fb);
-		struct drm_vmw_rect vclips;
-
-		vclips.x = crtc->x;
-		vclips.y = crtc->y;
-		vclips.w = crtc->mode.hdisplay;
-		vclips.h = crtc->mode.vdisplay;
 
 		if (vfb->bo)
-			ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
-						      &vclips, 1, 1, true,
-						      &fence, crtc);
+			ret = vmw_sou_plane_update_bo(dev_priv, plane,
+						      old_state, vfb, &fence);
 		else
-			ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
-							   &vclips, NULL, 0, 0,
-							   1, 1, &fence, crtc);
-
-		/*
-		 * We cannot really fail this function, so if we do, then output
-		 * an error and maintain consistent atomic state.
-		 */
+			ret = vmw_sou_plane_update_surface(dev_priv, plane,
+							   old_state, vfb,
+							   &fence);
 		if (ret != 0)
 			DRM_ERROR("Failed to update screen.\n");
 	} else {
-		/*
-		 * When disabling a plane, CRTC and FB should always be NULL
-		 * together, otherwise it's an error.
-		 * Here primary plane is being disable so should really blank
-		 * the screen object display unit, if not already done.
-		 */
+		/* Do nothing when fb and crtc is NULL (blank crtc) */
 		return;
 	}
 
+	/* For error case vblank event is send from vmw_du_crtc_atomic_flush */
 	event = crtc->state->event;
-	/*
-	 * In case of failure and other cases, vblank event will be sent in
-	 * vmw_du_crtc_atomic_flush.
-	 */
 	if (event && fence) {
 		struct drm_file *file_priv = event->base.file_priv;
 
@@ -640,7 +848,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 	primary = &sou->base.primary;
 	cursor = &sou->base.cursor;
 
-	sou->base.active_implicit = false;
 	sou->base.pref_active = (unit == 0);
 	sou->base.pref_width = dev_priv->initial_width;
 	sou->base.pref_height = dev_priv->initial_height;
@@ -666,6 +873,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 	}
 
 	drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+	drm_plane_enable_fb_damage_clips(primary);
 
 	/* Initialize cursor plane */
 	vmw_du_plane_reset(cursor);
@@ -693,8 +901,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
 	connector->status = vmw_du_connector_detect(connector, true);
-	vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
-
 
 	ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
 			       DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -733,12 +939,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 				   dev->mode_config.suggested_x_property, 0);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_y_property, 0);
-	if (dev_priv->implicit_placement_property)
-		drm_object_attach_property
-			(&connector->base,
-			 dev_priv->implicit_placement_property,
-			 sou->base.is_implicit);
-
 	return 0;
 
 err_free_unregister:
@@ -764,15 +964,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 	}
 
 	ret = -ENOMEM;
-	dev_priv->num_implicit = 0;
-	dev_priv->implicit_fb = NULL;
 
 	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
 	if (unlikely(ret != 0))
 		return ret;
 
-	vmw_kms_create_implicit_placement_property(dev_priv, false);
-
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 		vmw_sou_init(dev_priv, i);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index e086565..096c294 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -30,7 +30,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-
+#include <drm/drm_damage_helper.h>
 
 #define vmw_crtc_to_stdu(x) \
 	container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -92,6 +92,10 @@ struct vmw_stdu_surface_copy {
 	SVGA3dCmdSurfaceCopy body;
 };
 
+struct vmw_stdu_update_gb_image {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBImage body;
+};
 
 /**
  * struct vmw_screen_target_display_unit
@@ -396,13 +400,8 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	if (!crtc->state->enable)
 		return;
 
-	if (stdu->base.is_implicit) {
-		x = crtc->x;
-		y = crtc->y;
-	} else {
-		x = vmw_conn_state->gui_x;
-		y = vmw_conn_state->gui_y;
-	}
+	x = vmw_conn_state->gui_x;
+	y = vmw_conn_state->gui_y;
 
 	vmw_svga_enable(dev_priv);
 	ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
@@ -417,27 +416,9 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
 {
 }
 
-
 static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
 					struct drm_crtc_state *old_state)
 {
-	struct drm_plane_state *plane_state = crtc->primary->state;
-	struct vmw_private *dev_priv;
-	struct vmw_screen_target_display_unit *stdu;
-	struct vmw_framebuffer *vfb;
-	struct drm_framebuffer *fb;
-
-
-	stdu     = vmw_crtc_to_stdu(crtc);
-	dev_priv = vmw_priv(crtc->dev);
-	fb       = plane_state->fb;
-
-	vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
-
-	if (vfb)
-		vmw_kms_add_active(dev_priv, &stdu->base, vfb);
-	else
-		vmw_kms_del_active(dev_priv, &stdu->base);
 }
 
 static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -472,49 +453,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 /**
- * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
- *
- * @crtc: CRTC to attach FB to
- * @fb: FB to attach
- * @event: Event to be posted. This event should've been alloced
- *         using k[mz]alloc, and should've been completely initialized.
- * @page_flip_flags: Input flags.
- *
- * If the STDU uses the same display and content buffers, i.e. a true flip,
- * this function will replace the existing display buffer with the new content
- * buffer.
- *
- * If the STDU uses different display and content buffers, i.e. a blit, then
- * only the content buffer will be updated.
- *
- * RETURNS:
- * 0 on success, error code on failure
- */
-static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
-				   struct drm_framebuffer *new_fb,
-				   struct drm_pending_vblank_event *event,
-				   uint32_t flags,
-				   struct drm_modeset_acquire_ctx *ctx)
-
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
-	int ret;
-
-	if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
-		return -EINVAL;
-
-	ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
-	if (ret) {
-		DRM_ERROR("Page flip error %d.\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-
-/**
  * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
  *
  * @dirty: The closure structure.
@@ -986,8 +924,8 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
 	.reset = vmw_du_crtc_reset,
 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
-	.set_config = vmw_kms_set_config,
-	.page_flip = vmw_stdu_crtc_page_flip,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
 };
 
 
@@ -1042,19 +980,15 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.detect = vmw_du_connector_detect,
 	.fill_modes = vmw_du_connector_fill_modes,
-	.set_property = vmw_du_connector_set_property,
 	.destroy = vmw_stdu_connector_destroy,
 	.reset = vmw_du_connector_reset,
 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
 	.atomic_destroy_state = vmw_du_connector_destroy_state,
-	.atomic_set_property = vmw_du_connector_atomic_set_property,
-	.atomic_get_property = vmw_du_connector_atomic_get_property,
 };
 
 
 static const struct
 drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
-	.best_encoder = drm_atomic_helper_best_encoder,
 };
 
 
@@ -1257,11 +1191,402 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 	return ret;
 }
 
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+				      uint32_t num_hits)
+{
+	return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+		sizeof(SVGA3dCmdSurfaceDMASuffix) +
+		sizeof(struct vmw_stdu_update);
+}
 
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+					  uint32_t num_hits)
+{
+	return sizeof(struct vmw_stdu_update_gb_image) +
+		sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane  *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	struct vmw_stdu_dma *cmd_dma = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd_dma->header.size = sizeof(cmd_dma->body) +
+		sizeof(struct SVGA3dCopyBox) * num_hits +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+	cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+	cmd_dma->body.host.sid = stdu->display_srf->res.id;
+	cmd_dma->body.host.face = 0;
+	cmd_dma->body.host.mipmap = 0;
+	cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+	return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane  *update,
+					  void *cmd, struct drm_rect *clip,
+					  uint32_t fb_x, uint32_t fb_y)
+{
+	struct SVGA3dCopyBox *box = cmd;
+
+	box->srcx = fb_x;
+	box->srcy = fb_y;
+	box->srcz = 0;
+	box->x = clip->x1;
+	box->y = clip->y1;
+	box->z = 0;
+	box->w = drm_rect_width(clip);
+	box->h = drm_rect_height(clip);
+	box->d = 1;
+
+	return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
+					    void *cmd, struct drm_rect *bb)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+	vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+				 bb->y1, bb->y2);
+
+	return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane  *update,
+					 void *cmd, uint32_t num_hits)
+{
+	struct vmw_du_update_plane_buffer *bo_update =
+		container_of(update, typeof(*bo_update), base);
+
+	bo_update->fb_left = INT_MAX;
+	bo_update->fb_top = INT_MAX;
+
+	return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane  *update,
+				     void *cmd, struct drm_rect *clip,
+				     uint32_t fb_x, uint32_t fb_y)
+{
+	struct vmw_du_update_plane_buffer *bo_update =
+		container_of(update, typeof(*bo_update), base);
+
+	bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+	bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+	return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane  *update, void *cmd,
+				struct drm_rect *bb)
+{
+	struct vmw_du_update_plane_buffer *bo_update;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_bo *vfbbo;
+	struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+	struct vmw_stdu_update_gb_image *cmd_img = cmd;
+	struct vmw_stdu_update *cmd_update;
+	struct ttm_buffer_object *src_bo, *dst_bo;
+	u32 src_offset, dst_offset;
+	s32 src_pitch, dst_pitch;
+	s32 width, height;
+
+	bo_update = container_of(update, typeof(*bo_update), base);
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+	width = bb->x2 - bb->x1;
+	height = bb->y2 - bb->y1;
+
+	diff.cpp = stdu->cpp;
+
+	dst_bo = &stdu->display_srf->res.backup->base;
+	dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+	dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+	src_bo = &vfbbo->buffer->base;
+	src_pitch = update->vfb->base.pitches[0];
+	src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+		stdu->cpp;
+
+	(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+			       src_offset, src_pitch, width * stdu->cpp, height,
+			       &diff);
+
+	if (drm_rect_visible(&diff.rect)) {
+		SVGA3dBox *box = &cmd_img->body.box;
+
+		cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd_img->header.size = sizeof(cmd_img->body);
+		cmd_img->body.image.sid = stdu->display_srf->res.id;
+		cmd_img->body.image.face = 0;
+		cmd_img->body.image.mipmap = 0;
+
+		box->x = diff.rect.x1;
+		box->y = diff.rect.y1;
+		box->z = 0;
+		box->w = drm_rect_width(&diff.rect);
+		box->h = drm_rect_height(&diff.rect);
+		box->d = 1;
+
+		cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+		vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+					 diff.rect.x1, diff.rect.x2,
+					 diff.rect.y1, diff.rect.y2);
+
+		return sizeof(*cmd_img) + sizeof(*cmd_update);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+				    struct drm_plane *plane,
+				    struct drm_plane_state *old_state,
+				    struct vmw_framebuffer *vfb,
+				    struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane_buffer bo_update;
+
+	memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+	bo_update.base.plane = plane;
+	bo_update.base.old_state = old_state;
+	bo_update.base.dev_priv = dev_priv;
+	bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+	bo_update.base.vfb = vfb;
+	bo_update.base.out_fence = out_fence;
+	bo_update.base.mutex = NULL;
+	bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+	bo_update.base.intr = false;
+
+	/*
+	 * VM without 3D support don't have surface DMA command and framebuffer
+	 * should be moved out of VRAM.
+	 */
+	if (bo_update.base.cpu_blit) {
+		bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+		bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+		bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+		bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+	} else {
+		bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+		bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+		bo_update.base.clip = vmw_stdu_bo_populate_clip;
+		bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+	}
+
+	return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+					uint32_t num_hits)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	uint32_t size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	if (vfbs->is_bo_proxy)
+		size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+	size += sizeof(struct vmw_stdu_update);
+
+	return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+					   uint32_t num_hits)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	uint32_t size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	if (vfbs->is_bo_proxy)
+		size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+	size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+		num_hits + sizeof(struct vmw_stdu_update);
+
+	return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+	struct vmw_framebuffer_surface *vfbs;
+	struct drm_plane_state *state = update->plane->state;
+	struct drm_plane_state *old_state = update->old_state;
+	struct vmw_stdu_update_gb_image *cmd_update = cmd;
+	struct drm_atomic_helper_damage_iter iter;
+	struct drm_rect clip;
+	uint32_t copy_size = 0;
+
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	/*
+	 * proxy surface is special where a buffer object type fb is wrapped
+	 * in a surface and need an update gb image command to sync with device.
+	 */
+	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+	drm_atomic_for_each_plane_damage(&iter, &clip) {
+		SVGA3dBox *box = &cmd_update->body.box;
+
+		cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd_update->header.size = sizeof(cmd_update->body);
+		cmd_update->body.image.sid = vfbs->surface->res.id;
+		cmd_update->body.image.face = 0;
+		cmd_update->body.image.mipmap = 0;
+
+		box->x = clip.x1;
+		box->y = clip.y1;
+		box->z = 0;
+		box->w = drm_rect_width(&clip);
+		box->h = drm_rect_height(&clip);
+		box->d = 1;
+
+		copy_size += sizeof(*cmd_update);
+		cmd_update++;
+	}
+
+	return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane  *update, void *cmd,
+			       uint32_t num_hits)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_surface *vfbs;
+	struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+	stdu = container_of(update->du, typeof(*stdu), base);
+	vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+	cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+	cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+		num_hits;
+	cmd_copy->body.src.sid = vfbs->surface->res.id;
+	cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+	return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane  *update, void *cmd,
+			       struct drm_rect *clip, uint32_t fb_x,
+			       uint32_t fb_y)
+{
+	struct SVGA3dCopyBox *box = cmd;
+
+	box->srcx = fb_x;
+	box->srcy = fb_y;
+	box->srcz = 0;
+	box->x = clip->x1;
+	box->y = clip->y1;
+	box->z = 0;
+	box->w = drm_rect_width(clip);
+	box->h = drm_rect_height(clip);
+	box->d = 1;
+
+	return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane  *update, void *cmd,
+				 struct drm_rect *bb)
+{
+	vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+				 bb->y2);
+
+	return sizeof(struct vmw_stdu_update);
+}
+
+/**
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ *             The returned fence pointer may be NULL in which case the device
+ *             has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+					 struct drm_plane *plane,
+					 struct drm_plane_state *old_state,
+					 struct vmw_framebuffer *vfb,
+					 struct vmw_fence_obj **out_fence)
+{
+	struct vmw_du_update_plane srf_update;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer_surface *vfbs;
+
+	stdu = vmw_crtc_to_stdu(plane->state->crtc);
+	vfbs = container_of(vfb, typeof(*vfbs), base);
+
+	memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+	srf_update.plane = plane;
+	srf_update.old_state = old_state;
+	srf_update.dev_priv = dev_priv;
+	srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+	srf_update.vfb = vfb;
+	srf_update.out_fence = out_fence;
+	srf_update.mutex = &dev_priv->cmdbuf_mutex;
+	srf_update.cpu_blit = false;
+	srf_update.intr = true;
+
+	if (vfbs->is_bo_proxy)
+		srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+	if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+		srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+		srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+		srf_update.clip = vmw_stdu_surface_populate_clip;
+	} else {
+		srf_update.calc_fifo_size =
+			vmw_stdu_surface_fifo_size_same_display;
+	}
+
+	srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+	return vmw_du_helper_plane_update(&srf_update);
+}
 
 /**
  * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
- *
  * @plane: display plane
  * @old_state: Only used to get crtc info
  *
@@ -1278,17 +1603,14 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 	struct drm_crtc *crtc = plane->state->crtc;
 	struct vmw_screen_target_display_unit *stdu;
 	struct drm_pending_vblank_event *event;
+	struct vmw_fence_obj *fence = NULL;
 	struct vmw_private *dev_priv;
 	int ret;
 
-	/*
-	 * We cannot really fail this function, so if we do, then output an
-	 * error and maintain consistent atomic state.
-	 */
+	/* If case of device error, maintain consistent atomic state */
 	if (crtc && plane->state->fb) {
 		struct vmw_framebuffer *vfb =
 			vmw_framebuffer_to_vfb(plane->state->fb);
-		struct drm_vmw_rect vclips;
 		stdu = vmw_crtc_to_stdu(crtc);
 		dev_priv = vmw_priv(crtc->dev);
 
@@ -1296,23 +1618,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 		stdu->content_fb_type = vps->content_fb_type;
 		stdu->cpp = vps->cpp;
 
-		vclips.x = crtc->x;
-		vclips.y = crtc->y;
-		vclips.w = crtc->mode.hdisplay;
-		vclips.h = crtc->mode.vdisplay;
-
 		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
 		if (ret)
 			DRM_ERROR("Failed to bind surface to STDU.\n");
 
 		if (vfb->bo)
-			ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
-					       &vclips, 1, 1, true, false,
-					       crtc);
+			ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+						       old_state, vfb, &fence);
 		else
-			ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
-							 &vclips, NULL, 0, 0,
-							 1, 1, NULL, crtc);
+			ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+							    old_state, vfb,
+							    &fence);
 		if (ret)
 			DRM_ERROR("Failed to update STDU.\n");
 	} else {
@@ -1320,12 +1636,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 		stdu = vmw_crtc_to_stdu(crtc);
 		dev_priv = vmw_priv(crtc->dev);
 
-		/*
-		 * When disabling a plane, CRTC and FB should always be NULL
-		 * together, otherwise it's an error.
-		 * Here primary plane is being disable so blank the screen
-		 * target display unit, if not already done.
-		 */
+		/* Blank STDU when fb and crtc are NULL */
 		if (!stdu->defined)
 			return;
 
@@ -1340,36 +1651,25 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 		return;
 	}
 
+	/* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
 	event = crtc->state->event;
-	/*
-	 * In case of failure and other cases, vblank event will be sent in
-	 * vmw_du_crtc_atomic_flush.
-	 */
-	if (event && (ret == 0)) {
-		struct vmw_fence_obj *fence = NULL;
+	if (event && fence) {
 		struct drm_file *file_priv = event->base.file_priv;
 
-		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-
-		/*
-		 * If fence is NULL, then already sync.
-		 */
-		if (fence) {
-			ret = vmw_event_fence_action_queue(
-				file_priv, fence, &event->base,
-				&event->event.vbl.tv_sec,
-				&event->event.vbl.tv_usec,
-				true);
-			if (ret)
-				DRM_ERROR("Failed to queue event on fence.\n");
-			else
-				crtc->state->event = NULL;
-
-			vmw_fence_obj_unreference(&fence);
-		}
-	} else {
-		(void) vmw_fifo_flush(dev_priv, false);
+		ret = vmw_event_fence_action_queue(file_priv,
+						   fence,
+						   &event->base,
+						   &event->event.vbl.tv_sec,
+						   &event->event.vbl.tv_usec,
+						   true);
+		if (ret)
+			DRM_ERROR("Failed to queue event on fence.\n");
+		else
+			crtc->state->event = NULL;
 	}
+
+	if (fence)
+		vmw_fence_obj_unreference(&fence);
 }
 
 
@@ -1457,11 +1757,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 	stdu->base.pref_active = (unit == 0);
 	stdu->base.pref_width  = dev_priv->initial_width;
 	stdu->base.pref_height = dev_priv->initial_height;
-
-	/*
-	 * Remove this after enabling atomic because property values can
-	 * only exist in a state object
-	 */
 	stdu->base.is_implicit = false;
 
 	/* Initialize primary plane */
@@ -1478,6 +1773,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 	}
 
 	drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+	drm_plane_enable_fb_damage_clips(primary);
 
 	/* Initialize cursor plane */
 	vmw_du_plane_reset(cursor);
@@ -1506,7 +1802,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
 	connector->status = vmw_du_connector_detect(connector, false);
-	vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
 
 	ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
 			       DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1544,11 +1839,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 				   dev->mode_config.suggested_x_property, 0);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_y_property, 0);
-	if (dev_priv->implicit_placement_property)
-		drm_object_attach_property
-			(&connector->base,
-			 dev_priv->implicit_placement_property,
-			 stdu->base.is_implicit);
 	return 0;
 
 err_free_unregister:
@@ -1617,8 +1907,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
 
 	dev_priv->active_display_unit = vmw_du_screen_target;
 
-	vmw_kms_create_implicit_placement_property(dev_priv, false);
-
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
 		ret = vmw_stdu_init(dev_priv, i);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index f882470..e6d75e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -43,60 +43,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
 	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
 }
 
-static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	DRM_INFO("global init.\n");
-	return ttm_mem_global_init(ref->object);
-}
-
-static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-int vmw_ttm_global_init(struct vmw_private *dev_priv)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &dev_priv->mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &vmw_ttm_mem_global_init;
-	global_ref->release = &vmw_ttm_mem_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM memory accounting.\n");
-		return ret;
-	}
-
-	dev_priv->bo_global_ref.mem_glob =
-		dev_priv->mem_global_ref.object;
-	global_ref = &dev_priv->bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-	ret = drm_global_item_ref(global_ref);
-
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed setting up TTM buffer objects.\n");
-		goto out_no_bo;
-	}
-
-	return 0;
-out_no_bo:
-	drm_global_item_unref(&dev_priv->mem_global_ref);
-	return ret;
-}
-
-void vmw_ttm_global_release(struct vmw_private *dev_priv)
-{
-	drm_global_item_unref(&dev_priv->bo_global_ref.ref);
-	drm_global_item_unref(&dev_priv->mem_global_ref);
-}
-
 /* struct vmw_validation_mem callback */
 static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
 {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f116f09..b3f547f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -285,7 +285,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
 		if (!val_buf->bo)
 			return -ESRCH;
-		val_buf->shared = false;
+		val_buf->num_shared = 0;
 		list_add_tail(&val_buf->head, &ctx->bo_list);
 		bo_node->as_mob = as_mob;
 		bo_node->cpu_blit = cpu_blit;
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 11ef17c..f5ea32a 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -114,7 +114,7 @@ static int zx_drm_bind(struct device *dev)
 	component_unbind_all(dev, drm);
 out_unregister:
 	dev_set_drvdata(dev, NULL);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 	return ret;
 }
 
@@ -124,10 +124,11 @@ static void zx_drm_unbind(struct device *dev)
 
 	drm_dev_unregister(drm);
 	drm_kms_helper_poll_fini(drm);
+	drm_atomic_helper_shutdown(drm);
 	drm_mode_config_cleanup(drm);
 	component_unbind_all(dev, drm);
 	dev_set_drvdata(dev, NULL);
-	drm_dev_unref(drm);
+	drm_dev_put(drm);
 }
 
 static const struct component_master_ops zx_drm_master_ops = {
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index ae8c53b..83d236f 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,6 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
 
 static void zx_plane_destroy(struct drm_plane *plane)
 {
-	drm_plane_helper_disable(plane, NULL);
 	drm_plane_cleanup(plane);
 }
 
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index b92016c..096017b 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -13,6 +13,7 @@
 	hw/host1x02.o \
 	hw/host1x04.o \
 	hw/host1x05.o \
-	hw/host1x06.o
+	hw/host1x06.o \
+	hw/host1x07.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index de6bc4e..419d8929 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -44,6 +44,7 @@
 #include "hw/host1x04.h"
 #include "hw/host1x05.h"
 #include "hw/host1x06.h"
+#include "hw/host1x07.h"
 
 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -130,7 +131,19 @@ static const struct host1x_info host1x06_info = {
 	.has_hypervisor = true,
 };
 
+static const struct host1x_info host1x07_info = {
+	.nb_channels = 63,
+	.nb_pts = 704,
+	.nb_mlocks = 32,
+	.nb_bases = 0,
+	.init = host1x07_init,
+	.sync_offset = 0x0,
+	.dma_mask = DMA_BIT_MASK(40),
+	.has_hypervisor = true,
+};
+
 static const struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
 	{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
 	{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
 	{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d188f90..95ea811 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -26,7 +26,6 @@
 #include "../intr.h"
 #include "../job.h"
 
-#define HOST1X_CHANNEL_SIZE 16384
 #define TRACE_MAX_LENGTH 128U
 
 static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
@@ -203,7 +202,11 @@ static void enable_gather_filter(struct host1x *host,
 static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
 			       unsigned int index)
 {
-	ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+#if HOST1X_HW < 6
+	ch->regs = dev->regs + index * 0x4000;
+#else
+	ch->regs = dev->regs + index * 0x100;
+#endif
 	enable_gather_filter(dev, ch);
 	return 0;
 }
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
index b503c74..8b74951 100644
--- a/drivers/gpu/host1x/hw/debug_hw_1x06.c
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -62,9 +62,12 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 					   struct host1x_channel *ch,
 					   struct output *o)
 {
-	u32 val, rd_ptr, wr_ptr, start, end;
+#if HOST1X_HW <= 6
+	u32 rd_ptr, wr_ptr, start, end;
 	u32 payload = INVALID_PAYLOAD;
 	unsigned int data_count = 0;
+#endif
+	u32 val;
 
 	host1x_debug_output(o, "%u: fifo:\n", ch->id);
 
@@ -78,6 +81,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 	val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
 	host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
 
+#if HOST1X_HW <= 6
 	/* Peek pointer values are invalid during SLCG, so disable it */
 	host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
 
@@ -127,6 +131,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 
 	host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
 	host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+#endif
 }
 
 static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
diff --git a/drivers/gpu/host1x/hw/host1x07.c b/drivers/gpu/host1x/hw/host1x07.c
new file mode 100644
index 0000000..04b779a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.c
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x07.h"
+#include "host1x07_hardware.h"
+
+/* include code */
+#define HOST1X_HW 7
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x07_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x07.h b/drivers/gpu/host1x/hw/host1x07.h
new file mode 100644
index 0000000..57b19f3
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X07_H
+#define HOST1X_HOST1X07_H
+
+struct host1x;
+
+int host1x07_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x07_hardware.h b/drivers/gpu/host1x/hw/host1x07_hardware.h
new file mode 100644
index 0000000..1353e7a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra194
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X07_HARDWARE_H
+#define __HOST1X_HOST1X07_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x07_uclass.h"
+#include "hw_host1x07_vm.h"
+#include "hw_host1x07_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+		| host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
index 4457486..e599e15 100644
--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -59,7 +59,7 @@ static inline u32 host1x_uclass_incr_syncpt_r(void)
 	host1x_uclass_incr_syncpt_r()
 static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
 {
-	return (v & 0xff) << 8;
+	return (v & 0xff) << 10;
 }
 #define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
 	host1x_uclass_incr_syncpt_cond_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
new file mode 100644
index 0000000..2b99d68
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN			0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN			BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x)		(0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL			0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x)		(x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x)		((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE		BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ			0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS			0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x)		(((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x)		((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x)			(0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x)		(((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x)		((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE			0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
new file mode 100644
index 0000000..7e4e3b3
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X07_UCLASS_H
+#define HOST1X_HW_HOST1X07_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 10;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
new file mode 100644
index 0000000..7e4629e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART				0x0000
+#define HOST1X_CHANNEL_DMASTART_HI			0x0004
+#define HOST1X_CHANNEL_DMAPUT				0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI			0x000c
+#define HOST1X_CHANNEL_DMAGET				0x0010
+#define HOST1X_CHANNEL_DMAGET_HI			0x0014
+#define HOST1X_CHANNEL_DMAEND				0x0018
+#define HOST1X_CHANNEL_DMAEND_HI			0x001c
+#define HOST1X_CHANNEL_DMACTRL				0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP			BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST		BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET		BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT			0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY		BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA			0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET			0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS			0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT			0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP			0x0048
+#define HOST1X_CHANNEL_TEARDOWN				0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x)			(0x6400 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x)	(0x6464 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x)	(0x652c + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x)	(0x6590 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT(x)				(0x8080 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)		(0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x)			(0xa604 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v)			(((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index a23bb33..d946660 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -37,10 +37,12 @@ static void syncpt_restore(struct host1x_syncpt *sp)
  */
 static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 {
+#if HOST1X_HW < 7
 	struct host1x *host = sp->host;
 
 	host1x_sync_writel(host, sp->base_val,
 			   HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
 }
 
 /*
@@ -48,10 +50,12 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
  */
 static void syncpt_read_wait_base(struct host1x_syncpt *sp)
 {
+#if HOST1X_HW < 7
 	struct host1x *host = sp->host;
 
 	sp->base_val =
 		host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
 }
 
 /*
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index a9d2501..163fadb 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -259,6 +259,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
 
 void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
 {
+	WARN_ON_ONCE(buf & 0x7);
+
 	if (bufnum)
 		ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
 	else
@@ -268,6 +270,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
 
 void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
 {
+	WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
+
 	ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
 	ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
 }
@@ -435,6 +439,8 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
 				   unsigned int uv_stride,
 				   unsigned int u_offset, unsigned int v_offset)
 {
+	WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
+
 	ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
 	ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
 	ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
@@ -739,48 +745,56 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
 	switch (pix->pixelformat) {
 	case V4L2_PIX_FMT_YUV420:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
-		u_offset = U_OFFSET(pix, image->rect.left,
-				    image->rect.top) - offset;
-		v_offset = V_OFFSET(pix, image->rect.left,
-				    image->rect.top) - offset;
+		u_offset = image->u_offset ?
+			image->u_offset : U_OFFSET(pix, image->rect.left,
+						   image->rect.top) - offset;
+		v_offset = image->v_offset ?
+			image->v_offset : V_OFFSET(pix, image->rect.left,
+						   image->rect.top) - offset;
 
 		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_YVU420:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
-		u_offset = U_OFFSET(pix, image->rect.left,
-				    image->rect.top) - offset;
-		v_offset = V_OFFSET(pix, image->rect.left,
-				    image->rect.top) - offset;
+		u_offset = image->u_offset ?
+			image->u_offset : V_OFFSET(pix, image->rect.left,
+						   image->rect.top) - offset;
+		v_offset = image->v_offset ?
+			image->v_offset : U_OFFSET(pix, image->rect.left,
+						   image->rect.top) - offset;
 
 		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
-					      v_offset, u_offset);
+					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_YUV422P:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
-		u_offset = U2_OFFSET(pix, image->rect.left,
-				     image->rect.top) - offset;
-		v_offset = V2_OFFSET(pix, image->rect.left,
-				     image->rect.top) - offset;
+		u_offset = image->u_offset ?
+			image->u_offset : U2_OFFSET(pix, image->rect.left,
+						    image->rect.top) - offset;
+		v_offset = image->v_offset ?
+			image->v_offset : V2_OFFSET(pix, image->rect.left,
+						    image->rect.top) - offset;
 
 		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_NV12:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
-		u_offset = UV_OFFSET(pix, image->rect.left,
-				     image->rect.top) - offset;
-		v_offset = 0;
+		u_offset = image->u_offset ?
+			image->u_offset : UV_OFFSET(pix, image->rect.left,
+						    image->rect.top) - offset;
+		v_offset = image->v_offset ? image->v_offset : 0;
 
 		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_NV16:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
-		u_offset = UV2_OFFSET(pix, image->rect.left,
-				      image->rect.top) - offset;
-		v_offset = 0;
+		u_offset = image->u_offset ?
+			image->u_offset : UV2_OFFSET(pix, image->rect.left,
+						     image->rect.top) - offset;
+		v_offset = image->v_offset ? image->v_offset : 0;
 
 		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
 					      u_offset, v_offset);
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 67cc820..594c3cb 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -442,36 +442,40 @@ int ipu_ic_task_graphics_init(struct ipu_ic *ic,
 }
 EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
 
-int ipu_ic_task_init(struct ipu_ic *ic,
-		     int in_width, int in_height,
-		     int out_width, int out_height,
-		     enum ipu_color_space in_cs,
-		     enum ipu_color_space out_cs)
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+			 int in_width, int in_height,
+			 int out_width, int out_height,
+			 enum ipu_color_space in_cs,
+			 enum ipu_color_space out_cs,
+			 u32 rsc)
 {
 	struct ipu_ic_priv *priv = ic->priv;
-	u32 reg, downsize_coeff, resize_coeff;
+	u32 downsize_coeff, resize_coeff;
 	unsigned long flags;
 	int ret = 0;
 
-	/* Setup vertical resizing */
-	ret = calc_resize_coeffs(ic, in_height, out_height,
-				 &resize_coeff, &downsize_coeff);
-	if (ret)
-		return ret;
+	if (!rsc) {
+		/* Setup vertical resizing */
 
-	reg = (downsize_coeff << 30) | (resize_coeff << 16);
+		ret = calc_resize_coeffs(ic, in_height, out_height,
+					 &resize_coeff, &downsize_coeff);
+		if (ret)
+			return ret;
 
-	/* Setup horizontal resizing */
-	ret = calc_resize_coeffs(ic, in_width, out_width,
-				 &resize_coeff, &downsize_coeff);
-	if (ret)
-		return ret;
+		rsc = (downsize_coeff << 30) | (resize_coeff << 16);
 
-	reg |= (downsize_coeff << 14) | resize_coeff;
+		/* Setup horizontal resizing */
+		ret = calc_resize_coeffs(ic, in_width, out_width,
+					 &resize_coeff, &downsize_coeff);
+		if (ret)
+			return ret;
+
+		rsc |= (downsize_coeff << 14) | resize_coeff;
+	}
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	ipu_ic_write(ic, reg, ic->reg->rsc);
+	ipu_ic_write(ic, rsc, ic->reg->rsc);
 
 	/* Setup color space conversion */
 	ic->in_cs = in_cs;
@@ -487,6 +491,16 @@ int ipu_ic_task_init(struct ipu_ic *ic,
 	spin_unlock_irqrestore(&priv->lock, flags);
 	return ret;
 }
+
+int ipu_ic_task_init(struct ipu_ic *ic,
+		     int in_width, int in_height,
+		     int out_width, int out_height,
+		     enum ipu_color_space in_cs,
+		     enum ipu_color_space out_cs)
+{
+	return ipu_ic_task_init_rsc(ic, in_width, in_height, out_width,
+				    out_height, in_cs, out_cs, 0);
+}
 EXPORT_SYMBOL_GPL(ipu_ic_task_init);
 
 int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index f408196..13103ab 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -37,17 +37,36 @@
  * when double_buffering boolean is set).
  *
  * Note that the input frame must be split up into the same number
- * of tiles as the output frame.
+ * of tiles as the output frame:
  *
- * FIXME: at this point there is no attempt to deal with visible seams
- * at the tile boundaries when upscaling. The seams are caused by a reset
- * of the bilinear upscale interpolation when starting a new tile. The
- * seams are barely visible for small upscale factors, but become
- * increasingly visible as the upscale factor gets larger, since more
- * interpolated pixels get thrown out at the tile boundaries. A possilble
- * fix might be to overlap tiles of different sizes, but this must be done
- * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
- * alignment restrictions of each tile.
+ *                       +---------+-----+
+ *   +-----+---+         |  A      | B   |
+ *   | A   | B |         |         |     |
+ *   +-----+---+   -->   +---------+-----+
+ *   | C   | D |         |  C      | D   |
+ *   +-----+---+         |         |     |
+ *                       +---------+-----+
+ *
+ * Clockwise 90° rotations are handled by first rescaling into a
+ * reusable temporary tile buffer and then rotating with the 8x8
+ * block rotator, writing to the correct destination:
+ *
+ *                                         +-----+-----+
+ *                                         |     |     |
+ *   +-----+---+         +---------+       | C   | A   |
+ *   | A   | B |         | A,B, |  |       |     |     |
+ *   +-----+---+   -->   | C,D  |  |  -->  |     |     |
+ *   | C   | D |         +---------+       +-----+-----+
+ *   +-----+---+                           | D   | B   |
+ *                                         |     |     |
+ *                                         +-----+-----+
+ *
+ * If the 8x8 block rotator is used, horizontal or vertical flipping
+ * is done during the rotation step, otherwise flipping is done
+ * during the scaling step.
+ * With rotation or flipping, tile order changes between input and
+ * output image. Tiles are numbered row major from top left to bottom
+ * right for both input and output image.
  */
 
 #define MAX_STRIPES_W    4
@@ -84,6 +103,8 @@ struct ipu_image_convert_dma_chan {
 struct ipu_image_tile {
 	u32 width;
 	u32 height;
+	u32 left;
+	u32 top;
 	/* size and strides are in bytes */
 	u32 size;
 	u32 stride;
@@ -135,6 +156,12 @@ struct ipu_image_convert_ctx {
 	struct ipu_image_convert_image in;
 	struct ipu_image_convert_image out;
 	enum ipu_rotate_mode rot_mode;
+	u32 downsize_coeff_h;
+	u32 downsize_coeff_v;
+	u32 image_resize_coeff_h;
+	u32 image_resize_coeff_v;
+	u32 resize_coeffs_h[MAX_STRIPES_W];
+	u32 resize_coeffs_v[MAX_STRIPES_H];
 
 	/* intermediate buffer for rotation */
 	struct ipu_image_convert_dma_buf rot_intermediate[2];
@@ -300,12 +327,11 @@ static void dump_format(struct ipu_image_convert_ctx *ctx,
 	struct ipu_image_convert_priv *priv = chan->priv;
 
 	dev_dbg(priv->ipu->dev,
-		"task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+		"task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
 		chan->ic_task, ctx,
 		ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
 		ic_image->base.pix.width, ic_image->base.pix.height,
 		ic_image->num_cols, ic_image->num_rows,
-		ic_image->tile[0].width, ic_image->tile[0].height,
 		ic_image->fmt->fourcc & 0xff,
 		(ic_image->fmt->fourcc >> 8) & 0xff,
 		(ic_image->fmt->fourcc >> 16) & 0xff,
@@ -353,24 +379,459 @@ static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
 
 static inline int num_stripes(int dim)
 {
-	if (dim <= 1024)
-		return 1;
-	else if (dim <= 2048)
-		return 2;
+	return (dim - 1) / 1024 + 1;
+}
+
+/*
+ * Calculate downsizing coefficients, which are the same for all tiles,
+ * and bilinear resizing coefficients, which are used to find the best
+ * seam positions.
+ */
+static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
+					  struct ipu_image *in,
+					  struct ipu_image *out)
+{
+	u32 downsized_width = in->rect.width;
+	u32 downsized_height = in->rect.height;
+	u32 downsize_coeff_v = 0;
+	u32 downsize_coeff_h = 0;
+	u32 resized_width = out->rect.width;
+	u32 resized_height = out->rect.height;
+	u32 resize_coeff_h;
+	u32 resize_coeff_v;
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		resized_width = out->rect.height;
+		resized_height = out->rect.width;
+	}
+
+	/* Do not let invalid input lead to an endless loop below */
+	if (WARN_ON(resized_width == 0 || resized_height == 0))
+		return -EINVAL;
+
+	while (downsized_width >= resized_width * 2) {
+		downsized_width >>= 1;
+		downsize_coeff_h++;
+	}
+
+	while (downsized_height >= resized_height * 2) {
+		downsized_height >>= 1;
+		downsize_coeff_v++;
+	}
+
+	/*
+	 * Calculate the bilinear resizing coefficients that could be used if
+	 * we were converting with a single tile. The bottom right output pixel
+	 * should sample as close as possible to the bottom right input pixel
+	 * out of the decimator, but not overshoot it:
+	 */
+	resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
+	resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
+
+	dev_dbg(ctx->chan->priv->ipu->dev,
+		"%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
+		__func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
+		resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
+
+	if (downsize_coeff_h > 2 || downsize_coeff_v  > 2 ||
+	    resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
+		return -EINVAL;
+
+	ctx->downsize_coeff_h = downsize_coeff_h;
+	ctx->downsize_coeff_v = downsize_coeff_v;
+	ctx->image_resize_coeff_h = resize_coeff_h;
+	ctx->image_resize_coeff_v = resize_coeff_v;
+
+	return 0;
+}
+
+#define round_closest(x, y) round_down((x) + (y)/2, (y))
+
+/*
+ * Find the best aligned seam position in the inverval [out_start, out_end].
+ * Rotation and image offsets are out of scope.
+ *
+ * @out_start: start of inverval, must be within 1024 pixels / lines
+ *             of out_end
+ * @out_end: end of interval, smaller than or equal to out_edge
+ * @in_edge: input right / bottom edge
+ * @out_edge: output right / bottom edge
+ * @in_align: input alignment, either horizontal 8-byte line start address
+ *            alignment, or pixel alignment due to image format
+ * @out_align: output alignment, either horizontal 8-byte line start address
+ *             alignment, or pixel alignment due to image format or rotator
+ *             block size
+ * @in_burst: horizontal input burst size in case of horizontal flip
+ * @out_burst: horizontal output burst size or rotator block size
+ * @downsize_coeff: downsizing section coefficient
+ * @resize_coeff: main processing section resizing coefficient
+ * @_in_seam: aligned input seam position return value
+ * @_out_seam: aligned output seam position return value
+ */
+static void find_best_seam(struct ipu_image_convert_ctx *ctx,
+			   unsigned int out_start,
+			   unsigned int out_end,
+			   unsigned int in_edge,
+			   unsigned int out_edge,
+			   unsigned int in_align,
+			   unsigned int out_align,
+			   unsigned int in_burst,
+			   unsigned int out_burst,
+			   unsigned int downsize_coeff,
+			   unsigned int resize_coeff,
+			   u32 *_in_seam,
+			   u32 *_out_seam)
+{
+	struct device *dev = ctx->chan->priv->ipu->dev;
+	unsigned int out_pos;
+	/* Input / output seam position candidates */
+	unsigned int out_seam = 0;
+	unsigned int in_seam = 0;
+	unsigned int min_diff = UINT_MAX;
+
+	/*
+	 * Output tiles must start at a multiple of 8 bytes horizontally and
+	 * possibly at an even line horizontally depending on the pixel format.
+	 * Only consider output aligned positions for the seam.
+	 */
+	out_start = round_up(out_start, out_align);
+	for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
+		unsigned int in_pos;
+		unsigned int in_pos_aligned;
+		unsigned int abs_diff;
+
+		/*
+		 * Tiles in the right row / bottom column may not be allowed to
+		 * overshoot horizontally / vertically. out_burst may be the
+		 * actual DMA burst size, or the rotator block size.
+		 */
+		if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
+			continue;
+
+		/*
+		 * Input sample position, corresponding to out_pos, 19.13 fixed
+		 * point.
+		 */
+		in_pos = (out_pos * resize_coeff) << downsize_coeff;
+		/*
+		 * The closest input sample position that we could actually
+		 * start the input tile at, 19.13 fixed point.
+		 */
+		in_pos_aligned = round_closest(in_pos, 8192U * in_align);
+
+		if ((in_burst > 1) &&
+		    (in_edge - in_pos_aligned / 8192U) % in_burst)
+			continue;
+
+		if (in_pos < in_pos_aligned)
+			abs_diff = in_pos_aligned - in_pos;
+		else
+			abs_diff = in_pos - in_pos_aligned;
+
+		if (abs_diff < min_diff) {
+			in_seam = in_pos_aligned;
+			out_seam = out_pos;
+			min_diff = abs_diff;
+		}
+	}
+
+	*_out_seam = out_seam;
+	/* Convert 19.13 fixed point to integer seam position */
+	*_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U);
+
+	dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n",
+		__func__, out_seam, out_align, out_start, out_end,
+		*_in_seam, in_align, min_diff / 8192,
+		DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
+}
+
+/*
+ * Tile left edges are required to be aligned to multiples of 8 bytes
+ * by the IDMAC.
+ */
+static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
+{
+	if (fmt->planar)
+		return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
 	else
-		return 4;
+		return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
+}
+
+/*
+ * Tile top edge alignment is only limited by chroma subsampling.
+ */
+static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
+{
+	return fmt->uv_height_dec > 1 ? 2 : 1;
+}
+
+static inline u32 tile_width_align(enum ipu_image_convert_type type,
+				   const struct ipu_image_pixfmt *fmt,
+				   enum ipu_rotate_mode rot_mode)
+{
+	if (type == IMAGE_CONVERT_IN) {
+		/*
+		 * The IC burst reads 8 pixels at a time. Reading beyond the
+		 * end of the line is usually acceptable. Those pixels are
+		 * ignored, unless the IC has to write the scaled line in
+		 * reverse.
+		 */
+		return (!ipu_rot_mode_is_irt(rot_mode) &&
+			(rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
+	}
+
+	/*
+	 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+	 * formats to guarantee 8-byte aligned line start addresses in the
+	 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+	 * for all other formats.
+	 */
+	return (ipu_rot_mode_is_irt(rot_mode) &&
+		fmt->planar && !fmt->uv_packed) ?
+		8 * fmt->uv_width_dec : 8;
+}
+
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+				    const struct ipu_image_pixfmt *fmt,
+				    enum ipu_rotate_mode rot_mode)
+{
+	if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
+		return 2;
+
+	/*
+	 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+	 * formats to guarantee 8-byte aligned line start addresses in the
+	 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+	 * for all other formats.
+	 */
+	return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * Fill in left position and width and for all tiles in an input column, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a row, and output tile top position and height are set.
+ */
+static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
+			     unsigned int col,
+			     struct ipu_image_convert_image *in,
+			     unsigned int in_left, unsigned int in_width,
+			     struct ipu_image_convert_image *out,
+			     unsigned int out_left, unsigned int out_width)
+{
+	unsigned int row, tile_idx;
+	struct ipu_image_tile *in_tile, *out_tile;
+
+	for (row = 0; row < in->num_rows; row++) {
+		tile_idx = in->num_cols * row + col;
+		in_tile = &in->tile[tile_idx];
+		out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+		in_tile->left = in_left;
+		in_tile->width = in_width;
+
+		if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+			out_tile->top = out_left;
+			out_tile->height = out_width;
+		} else {
+			out_tile->left = out_left;
+			out_tile->width = out_width;
+		}
+	}
+}
+
+/*
+ * Fill in top position and height and for all tiles in an input row, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a column, and output tile left position and width are set.
+ */
+static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
+			  struct ipu_image_convert_image *in,
+			  unsigned int in_top, unsigned int in_height,
+			  struct ipu_image_convert_image *out,
+			  unsigned int out_top, unsigned int out_height)
+{
+	unsigned int col, tile_idx;
+	struct ipu_image_tile *in_tile, *out_tile;
+
+	for (col = 0; col < in->num_cols; col++) {
+		tile_idx = in->num_cols * row + col;
+		in_tile = &in->tile[tile_idx];
+		out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+		in_tile->top = in_top;
+		in_tile->height = in_height;
+
+		if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+			out_tile->left = out_top;
+			out_tile->width = out_height;
+		} else {
+			out_tile->top = out_top;
+			out_tile->height = out_height;
+		}
+	}
+}
+
+/*
+ * Find the best horizontal and vertical seam positions to split into tiles.
+ * Minimize the fractional part of the input sampling position for the
+ * top / left pixels of each tile.
+ */
+static void find_seams(struct ipu_image_convert_ctx *ctx,
+		       struct ipu_image_convert_image *in,
+		       struct ipu_image_convert_image *out)
+{
+	struct device *dev = ctx->chan->priv->ipu->dev;
+	unsigned int resized_width = out->base.rect.width;
+	unsigned int resized_height = out->base.rect.height;
+	unsigned int col;
+	unsigned int row;
+	unsigned int in_left_align = tile_left_align(in->fmt);
+	unsigned int in_top_align = tile_top_align(in->fmt);
+	unsigned int out_left_align = tile_left_align(out->fmt);
+	unsigned int out_top_align = tile_top_align(out->fmt);
+	unsigned int out_width_align = tile_width_align(out->type, out->fmt,
+							ctx->rot_mode);
+	unsigned int out_height_align = tile_height_align(out->type, out->fmt,
+							  ctx->rot_mode);
+	unsigned int in_right = in->base.rect.width;
+	unsigned int in_bottom = in->base.rect.height;
+	unsigned int out_right = out->base.rect.width;
+	unsigned int out_bottom = out->base.rect.height;
+	unsigned int flipped_out_left;
+	unsigned int flipped_out_top;
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		/* Switch width/height and align top left to IRT block size */
+		resized_width = out->base.rect.height;
+		resized_height = out->base.rect.width;
+		out_left_align = out_height_align;
+		out_top_align = out_width_align;
+		out_width_align = out_left_align;
+		out_height_align = out_top_align;
+		out_right = out->base.rect.height;
+		out_bottom = out->base.rect.width;
+	}
+
+	for (col = in->num_cols - 1; col > 0; col--) {
+		bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
+					  !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+		bool allow_out_overshoot = (col < in->num_cols - 1) &&
+					   !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+		unsigned int out_start;
+		unsigned int out_end;
+		unsigned int in_left;
+		unsigned int out_left;
+
+		/*
+		 * Align input width to burst length if the scaling step flips
+		 * horizontally.
+		 */
+
+		/* Start within 1024 pixels of the right edge */
+		out_start = max_t(int, 0, out_right - 1024);
+		/* End before having to add more columns to the left */
+		out_end = min_t(unsigned int, out_right, col * 1024);
+
+		find_best_seam(ctx, out_start, out_end,
+			       in_right, out_right,
+			       in_left_align, out_left_align,
+			       allow_in_overshoot ? 1 : 8 /* burst length */,
+			       allow_out_overshoot ? 1 : out_width_align,
+			       ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
+			       &in_left, &out_left);
+
+		if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+			flipped_out_left = resized_width - out_right;
+		else
+			flipped_out_left = out_left;
+
+		fill_tile_column(ctx, col, in, in_left, in_right - in_left,
+				 out, flipped_out_left, out_right - out_left);
+
+		dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
+			in_left, in_right - in_left,
+			flipped_out_left, out_right - out_left);
+
+		in_right = in_left;
+		out_right = out_left;
+	}
+
+	flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
+			   resized_width - out_right : 0;
+
+	fill_tile_column(ctx, 0, in, 0, in_right,
+			 out, flipped_out_left, out_right);
+
+	dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
+		in_right, flipped_out_left, out_right);
+
+	for (row = in->num_rows - 1; row > 0; row--) {
+		bool allow_overshoot = row < in->num_rows - 1;
+		unsigned int out_start;
+		unsigned int out_end;
+		unsigned int in_top;
+		unsigned int out_top;
+
+		/* Start within 1024 lines of the bottom edge */
+		out_start = max_t(int, 0, out_bottom - 1024);
+		/* End before having to add more rows above */
+		out_end = min_t(unsigned int, out_bottom, row * 1024);
+
+		find_best_seam(ctx, out_start, out_end,
+			       in_bottom, out_bottom,
+			       in_top_align, out_top_align,
+			       1, allow_overshoot ? 1 : out_height_align,
+			       ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
+			       &in_top, &out_top);
+
+		if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+		    ipu_rot_mode_is_irt(ctx->rot_mode))
+			flipped_out_top = resized_height - out_bottom;
+		else
+			flipped_out_top = out_top;
+
+		fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
+			      out, flipped_out_top, out_bottom - out_top);
+
+		dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
+			in_top, in_bottom - in_top,
+			flipped_out_top, out_bottom - out_top);
+
+		in_bottom = in_top;
+		out_bottom = out_top;
+	}
+
+	if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+	    ipu_rot_mode_is_irt(ctx->rot_mode))
+		flipped_out_top = resized_height - out_bottom;
+	else
+		flipped_out_top = 0;
+
+	fill_tile_row(ctx, 0, in, 0, in_bottom,
+		      out, flipped_out_top, out_bottom);
+
+	dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
+		in_bottom, flipped_out_top, out_bottom);
 }
 
 static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
 				 struct ipu_image_convert_image *image)
 {
-	int i;
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	unsigned int i;
 
 	for (i = 0; i < ctx->num_tiles; i++) {
-		struct ipu_image_tile *tile = &image->tile[i];
+		struct ipu_image_tile *tile;
+		const unsigned int row = i / image->num_cols;
+		const unsigned int col = i % image->num_cols;
 
-		tile->height = image->base.pix.height / image->num_rows;
-		tile->width = image->base.pix.width / image->num_cols;
+		if (image->type == IMAGE_CONVERT_OUT)
+			tile = &image->tile[ctx->out_tile_map[i]];
+		else
+			tile = &image->tile[i];
+
 		tile->size = ((tile->height * image->fmt->bpp) >> 3) *
 			tile->width;
 
@@ -383,6 +844,13 @@ static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
 			tile->rot_stride =
 				(image->fmt->bpp * tile->height) >> 3;
 		}
+
+		dev_dbg(priv->ipu->dev,
+			"task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
+			chan->ic_task, ctx,
+			image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
+			row, col,
+			tile->width, tile->height, tile->left, tile->top);
 	}
 }
 
@@ -459,14 +927,14 @@ static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
 	}
 }
 
-static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
-				     struct ipu_image_convert_image *image)
+static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+				    struct ipu_image_convert_image *image)
 {
 	struct ipu_image_convert_chan *chan = ctx->chan;
 	struct ipu_image_convert_priv *priv = chan->priv;
 	const struct ipu_image_pixfmt *fmt = image->fmt;
 	unsigned int row, col, tile = 0;
-	u32 H, w, h, y_stride, uv_stride;
+	u32 H, top, y_stride, uv_stride;
 	u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
 	u32 y_row_off, y_col_off, y_off;
 	u32 y_size, uv_size;
@@ -483,13 +951,12 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
 	uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
 
 	for (row = 0; row < image->num_rows; row++) {
-		w = image->tile[tile].width;
-		h = image->tile[tile].height;
-		y_row_off = row * h * y_stride;
-		uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+		top = image->tile[tile].top;
+		y_row_off = top * y_stride;
+		uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
 
 		for (col = 0; col < image->num_cols; col++) {
-			y_col_off = col * w;
+			y_col_off = image->tile[tile].left;
 			uv_col_off = y_col_off / fmt->uv_width_dec;
 			if (fmt->uv_packed)
 				uv_col_off *= 2;
@@ -509,24 +976,30 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
 			image->tile[tile].u_off = u_off;
 			image->tile[tile++].v_off = v_off;
 
-			dev_dbg(priv->ipu->dev,
-				"task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
-				chan->ic_task, ctx,
-				image->type == IMAGE_CONVERT_IN ?
-				"Input" : "Output", row, col,
-				y_off, u_off, v_off);
+			if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
+				dev_err(priv->ipu->dev,
+					"task %u: ctx %p: %s@[%d,%d]: "
+					"y_off %08x, u_off %08x, v_off %08x\n",
+					chan->ic_task, ctx,
+					image->type == IMAGE_CONVERT_IN ?
+					"Input" : "Output", row, col,
+					y_off, u_off, v_off);
+				return -EINVAL;
+			}
 		}
 	}
+
+	return 0;
 }
 
-static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
-				     struct ipu_image_convert_image *image)
+static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+				    struct ipu_image_convert_image *image)
 {
 	struct ipu_image_convert_chan *chan = ctx->chan;
 	struct ipu_image_convert_priv *priv = chan->priv;
 	const struct ipu_image_pixfmt *fmt = image->fmt;
 	unsigned int row, col, tile = 0;
-	u32 w, h, bpp, stride;
+	u32 bpp, stride, offset;
 	u32 row_off, col_off;
 
 	/* setup some convenience vars */
@@ -534,34 +1007,183 @@ static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
 	bpp = fmt->bpp;
 
 	for (row = 0; row < image->num_rows; row++) {
-		w = image->tile[tile].width;
-		h = image->tile[tile].height;
-		row_off = row * h * stride;
+		row_off = image->tile[tile].top * stride;
 
 		for (col = 0; col < image->num_cols; col++) {
-			col_off = (col * w * bpp) >> 3;
+			col_off = (image->tile[tile].left * bpp) >> 3;
 
-			image->tile[tile].offset = row_off + col_off;
+			offset = row_off + col_off;
+
+			image->tile[tile].offset = offset;
 			image->tile[tile].u_off = 0;
 			image->tile[tile++].v_off = 0;
 
-			dev_dbg(priv->ipu->dev,
-				"task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
-				chan->ic_task, ctx,
-				image->type == IMAGE_CONVERT_IN ?
-				"Input" : "Output", row, col,
-				row_off + col_off);
+			if (offset & 0x7) {
+				dev_err(priv->ipu->dev,
+					"task %u: ctx %p: %s@[%d,%d]: "
+					"phys %08x\n",
+					chan->ic_task, ctx,
+					image->type == IMAGE_CONVERT_IN ?
+					"Input" : "Output", row, col,
+					row_off + col_off);
+				return -EINVAL;
+			}
 		}
 	}
+
+	return 0;
 }
 
-static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
 			      struct ipu_image_convert_image *image)
 {
 	if (image->fmt->planar)
-		calc_tile_offsets_planar(ctx, image);
+		return calc_tile_offsets_planar(ctx, image);
+
+	return calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * Calculate the resizing ratio for the IC main processing section given input
+ * size, fixed downsizing coefficient, and output size.
+ * Either round to closest for the next tile's first pixel to minimize seams
+ * and distortion (for all but right column / bottom row), or round down to
+ * avoid sampling beyond the edges of the input image for this tile's last
+ * pixel.
+ * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
+ */
+static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
+			     u32 output_size, bool allow_overshoot)
+{
+	u32 downsized = input_size >> downsize_coeff;
+
+	if (allow_overshoot)
+		return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
 	else
-		calc_tile_offsets_packed(ctx, image);
+		return 8192 * (downsized - 1) / (output_size - 1);
+}
+
+/*
+ * Slightly modify resize coefficients per tile to hide the bilinear
+ * interpolator reset at tile borders, shifting the right / bottom edge
+ * by up to a half input pixel. This removes noticeable seams between
+ * tiles at higher upscaling factors.
+ */
+static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_tile *in_tile, *out_tile;
+	unsigned int col, row, tile_idx;
+	unsigned int last_output;
+
+	for (col = 0; col < ctx->in.num_cols; col++) {
+		bool closest = (col < ctx->in.num_cols - 1) &&
+			       !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+		u32 resized_width;
+		u32 resize_coeff_h;
+
+		tile_idx = col;
+		in_tile = &ctx->in.tile[tile_idx];
+		out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+		if (ipu_rot_mode_is_irt(ctx->rot_mode))
+			resized_width = out_tile->height;
+		else
+			resized_width = out_tile->width;
+
+		resize_coeff_h = calc_resize_coeff(in_tile->width,
+						   ctx->downsize_coeff_h,
+						   resized_width, closest);
+
+		dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
+			__func__, col, resize_coeff_h);
+
+
+		for (row = 0; row < ctx->in.num_rows; row++) {
+			tile_idx = row * ctx->in.num_cols + col;
+			in_tile = &ctx->in.tile[tile_idx];
+			out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+			/*
+			 * With the horizontal scaling factor known, round up
+			 * resized width (output width or height) to burst size.
+			 */
+			if (ipu_rot_mode_is_irt(ctx->rot_mode))
+				out_tile->height = round_up(resized_width, 8);
+			else
+				out_tile->width = round_up(resized_width, 8);
+
+			/*
+			 * Calculate input width from the last accessed input
+			 * pixel given resized width and scaling coefficients.
+			 * Round up to burst size.
+			 */
+			last_output = round_up(resized_width, 8) - 1;
+			if (closest)
+				last_output++;
+			in_tile->width = round_up(
+				(DIV_ROUND_UP(last_output * resize_coeff_h,
+					      8192) + 1)
+				<< ctx->downsize_coeff_h, 8);
+		}
+
+		ctx->resize_coeffs_h[col] = resize_coeff_h;
+	}
+
+	for (row = 0; row < ctx->in.num_rows; row++) {
+		bool closest = (row < ctx->in.num_rows - 1) &&
+			       !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
+		u32 resized_height;
+		u32 resize_coeff_v;
+
+		tile_idx = row * ctx->in.num_cols;
+		in_tile = &ctx->in.tile[tile_idx];
+		out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+		if (ipu_rot_mode_is_irt(ctx->rot_mode))
+			resized_height = out_tile->width;
+		else
+			resized_height = out_tile->height;
+
+		resize_coeff_v = calc_resize_coeff(in_tile->height,
+						   ctx->downsize_coeff_v,
+						   resized_height, closest);
+
+		dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
+			__func__, row, resize_coeff_v);
+
+		for (col = 0; col < ctx->in.num_cols; col++) {
+			tile_idx = row * ctx->in.num_cols + col;
+			in_tile = &ctx->in.tile[tile_idx];
+			out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+			/*
+			 * With the vertical scaling factor known, round up
+			 * resized height (output width or height) to IDMAC
+			 * limitations.
+			 */
+			if (ipu_rot_mode_is_irt(ctx->rot_mode))
+				out_tile->width = round_up(resized_height, 2);
+			else
+				out_tile->height = round_up(resized_height, 2);
+
+			/*
+			 * Calculate input width from the last accessed input
+			 * pixel given resized height and scaling coefficients.
+			 * Align to IDMAC restrictions.
+			 */
+			last_output = round_up(resized_height, 2) - 1;
+			if (closest)
+				last_output++;
+			in_tile->height = round_up(
+				(DIV_ROUND_UP(last_output * resize_coeff_v,
+					      8192) + 1)
+				<< ctx->downsize_coeff_v, 2);
+		}
+
+		ctx->resize_coeffs_v[row] = resize_coeff_v;
+	}
 }
 
 /*
@@ -611,7 +1233,8 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
 			       struct ipuv3_channel *channel,
 			       struct ipu_image_convert_image *image,
 			       enum ipu_rotate_mode rot_mode,
-			       bool rot_swap_width_height)
+			       bool rot_swap_width_height,
+			       unsigned int tile)
 {
 	struct ipu_image_convert_chan *chan = ctx->chan;
 	unsigned int burst_size;
@@ -621,23 +1244,23 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
 	unsigned int tile_idx[2];
 
 	if (image->type == IMAGE_CONVERT_OUT) {
-		tile_idx[0] = ctx->out_tile_map[0];
+		tile_idx[0] = ctx->out_tile_map[tile];
 		tile_idx[1] = ctx->out_tile_map[1];
 	} else {
-		tile_idx[0] = 0;
+		tile_idx[0] = tile;
 		tile_idx[1] = 1;
 	}
 
 	if (rot_swap_width_height) {
-		width = image->tile[0].height;
-		height = image->tile[0].width;
-		stride = image->tile[0].rot_stride;
+		width = image->tile[tile_idx[0]].height;
+		height = image->tile[tile_idx[0]].width;
+		stride = image->tile[tile_idx[0]].rot_stride;
 		addr0 = ctx->rot_intermediate[0].phys;
 		if (ctx->double_buffering)
 			addr1 = ctx->rot_intermediate[1].phys;
 	} else {
-		width = image->tile[0].width;
-		height = image->tile[0].height;
+		width = image->tile[tile_idx[0]].width;
+		height = image->tile[tile_idx[0]].height;
 		stride = image->stride;
 		addr0 = image->base.phys0 +
 			image->tile[tile_idx[0]].offset;
@@ -655,12 +1278,12 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
 	tile_image.pix.pixelformat =  image->fmt->fourcc;
 	tile_image.phys0 = addr0;
 	tile_image.phys1 = addr1;
-	ipu_cpmem_set_image(channel, &tile_image);
+	if (image->fmt->planar && !rot_swap_width_height) {
+		tile_image.u_offset = image->tile[tile_idx[0]].u_off;
+		tile_image.v_offset = image->tile[tile_idx[0]].v_off;
+	}
 
-	if (image->fmt->planar && !rot_swap_width_height)
-		ipu_cpmem_set_uv_offset(channel,
-					image->tile[tile_idx[0]].u_off,
-					image->tile[tile_idx[0]].v_off);
+	ipu_cpmem_set_image(channel, &tile_image);
 
 	if (rot_mode)
 		ipu_cpmem_set_rotation(channel, rot_mode);
@@ -687,7 +1310,7 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
 	ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
 }
 
-static int convert_start(struct ipu_image_convert_run *run)
+static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
 {
 	struct ipu_image_convert_ctx *ctx = run->ctx;
 	struct ipu_image_convert_chan *chan = ctx->chan;
@@ -695,31 +1318,47 @@ static int convert_start(struct ipu_image_convert_run *run)
 	struct ipu_image_convert_image *s_image = &ctx->in;
 	struct ipu_image_convert_image *d_image = &ctx->out;
 	enum ipu_color_space src_cs, dest_cs;
+	unsigned int dst_tile = ctx->out_tile_map[tile];
 	unsigned int dest_width, dest_height;
+	unsigned int col, row;
+	u32 rsc;
 	int ret;
 
-	dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
-		__func__, chan->ic_task, ctx, run);
+	dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
+		__func__, chan->ic_task, ctx, run, tile, dst_tile);
 
 	src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
 	dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
 
 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
 		/* swap width/height for resizer */
-		dest_width = d_image->tile[0].height;
-		dest_height = d_image->tile[0].width;
+		dest_width = d_image->tile[dst_tile].height;
+		dest_height = d_image->tile[dst_tile].width;
 	} else {
-		dest_width = d_image->tile[0].width;
-		dest_height = d_image->tile[0].height;
+		dest_width = d_image->tile[dst_tile].width;
+		dest_height = d_image->tile[dst_tile].height;
 	}
 
+	row = tile / s_image->num_cols;
+	col = tile % s_image->num_cols;
+
+	rsc =  (ctx->downsize_coeff_v << 30) |
+	       (ctx->resize_coeffs_v[row] << 16) |
+	       (ctx->downsize_coeff_h << 14) |
+	       (ctx->resize_coeffs_h[col]);
+
+	dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
+		__func__, s_image->tile[tile].width,
+		s_image->tile[tile].height, dest_width, dest_height, rsc);
+
 	/* setup the IC resizer and CSC */
-	ret = ipu_ic_task_init(chan->ic,
-			       s_image->tile[0].width,
-			       s_image->tile[0].height,
+	ret = ipu_ic_task_init_rsc(chan->ic,
+			       s_image->tile[tile].width,
+			       s_image->tile[tile].height,
 			       dest_width,
 			       dest_height,
-			       src_cs, dest_cs);
+			       src_cs, dest_cs,
+			       rsc);
 	if (ret) {
 		dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
 		return ret;
@@ -727,27 +1366,27 @@ static int convert_start(struct ipu_image_convert_run *run)
 
 	/* init the source MEM-->IC PP IDMAC channel */
 	init_idmac_channel(ctx, chan->in_chan, s_image,
-			   IPU_ROTATE_NONE, false);
+			   IPU_ROTATE_NONE, false, tile);
 
 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
 		/* init the IC PP-->MEM IDMAC channel */
 		init_idmac_channel(ctx, chan->out_chan, d_image,
-				   IPU_ROTATE_NONE, true);
+				   IPU_ROTATE_NONE, true, tile);
 
 		/* init the MEM-->IC PP ROT IDMAC channel */
 		init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
-				   ctx->rot_mode, true);
+				   ctx->rot_mode, true, tile);
 
 		/* init the destination IC PP ROT-->MEM IDMAC channel */
 		init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
-				   IPU_ROTATE_NONE, false);
+				   IPU_ROTATE_NONE, false, tile);
 
 		/* now link IC PP-->MEM to MEM-->IC PP ROT */
 		ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
 	} else {
 		/* init the destination IC PP-->MEM IDMAC channel */
 		init_idmac_channel(ctx, chan->out_chan, d_image,
-				   ctx->rot_mode, false);
+				   ctx->rot_mode, false, tile);
 	}
 
 	/* enable the IC */
@@ -805,7 +1444,7 @@ static int do_run(struct ipu_image_convert_run *run)
 	list_del(&run->list);
 	chan->current_run = run;
 
-	return convert_start(run);
+	return convert_start(run, 0);
 }
 
 /* hold irqlock when calling */
@@ -896,7 +1535,7 @@ static irqreturn_t do_bh(int irq, void *dev_id)
 			dev_dbg(priv->ipu->dev,
 				"%s: task %u: signaling abort for ctx %p\n",
 				__func__, chan->ic_task, ctx);
-			complete(&ctx->aborted);
+			complete_all(&ctx->aborted);
 		}
 	}
 
@@ -908,6 +1547,24 @@ static irqreturn_t do_bh(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
+{
+	unsigned int cur_tile = ctx->next_tile - 1;
+	unsigned int next_tile = ctx->next_tile;
+
+	if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
+	    ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
+	    ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
+	    ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
+	    ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
+	    ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
+	    ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
+	    ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
+		return true;
+
+	return false;
+}
+
 /* hold irqlock when calling */
 static irqreturn_t do_irq(struct ipu_image_convert_run *run)
 {
@@ -951,27 +1608,32 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
 	 * not done, place the next tile buffers.
 	 */
 	if (!ctx->double_buffering) {
+		if (ic_settings_changed(ctx)) {
+			convert_stop(run);
+			convert_start(run, ctx->next_tile);
+		} else {
+			src_tile = &s_image->tile[ctx->next_tile];
+			dst_idx = ctx->out_tile_map[ctx->next_tile];
+			dst_tile = &d_image->tile[dst_idx];
 
-		src_tile = &s_image->tile[ctx->next_tile];
-		dst_idx = ctx->out_tile_map[ctx->next_tile];
-		dst_tile = &d_image->tile[dst_idx];
+			ipu_cpmem_set_buffer(chan->in_chan, 0,
+					     s_image->base.phys0 +
+					     src_tile->offset);
+			ipu_cpmem_set_buffer(outch, 0,
+					     d_image->base.phys0 +
+					     dst_tile->offset);
+			if (s_image->fmt->planar)
+				ipu_cpmem_set_uv_offset(chan->in_chan,
+							src_tile->u_off,
+							src_tile->v_off);
+			if (d_image->fmt->planar)
+				ipu_cpmem_set_uv_offset(outch,
+							dst_tile->u_off,
+							dst_tile->v_off);
 
-		ipu_cpmem_set_buffer(chan->in_chan, 0,
-				     s_image->base.phys0 + src_tile->offset);
-		ipu_cpmem_set_buffer(outch, 0,
-				     d_image->base.phys0 + dst_tile->offset);
-		if (s_image->fmt->planar)
-			ipu_cpmem_set_uv_offset(chan->in_chan,
-						src_tile->u_off,
-						src_tile->v_off);
-		if (d_image->fmt->planar)
-			ipu_cpmem_set_uv_offset(outch,
-						dst_tile->u_off,
-						dst_tile->v_off);
-
-		ipu_idmac_select_buffer(chan->in_chan, 0);
-		ipu_idmac_select_buffer(outch, 0);
-
+			ipu_idmac_select_buffer(chan->in_chan, 0);
+			ipu_idmac_select_buffer(outch, 0);
+		}
 	} else if (ctx->next_tile < ctx->num_tiles - 1) {
 
 		src_tile = &s_image->tile[ctx->next_tile + 1];
@@ -1198,9 +1860,6 @@ static int fill_image(struct ipu_image_convert_ctx *ctx,
 	else
 		ic_image->stride  = ic_image->base.pix.bytesperline;
 
-	calc_tile_dimensions(ctx, ic_image);
-	calc_tile_offsets(ctx, ic_image);
-
 	return 0;
 }
 
@@ -1221,40 +1880,11 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
 	return x;
 }
 
-/*
- * We have to adjust the tile width such that the tile physaddrs and
- * U and V plane offsets are multiples of 8 bytes as required by
- * the IPU DMA Controller. For the planar formats, this corresponds
- * to a pixel alignment of 16 (but use a more formal equation since
- * the variables are available). For all the packed formats, 8 is
- * good enough.
- */
-static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
-{
-	return fmt->planar ? 8 * fmt->uv_width_dec : 8;
-}
-
-/*
- * For tile height alignment, we have to ensure that the output tile
- * heights are multiples of 8 lines if the IRT is required by the
- * given rotation mode (the IRT performs rotations on 8x8 blocks
- * at a time). If the IRT is not used, or for input image tiles,
- * 2 lines are good enough.
- */
-static inline u32 tile_height_align(enum ipu_image_convert_type type,
-				    enum ipu_rotate_mode rot_mode)
-{
-	return (type == IMAGE_CONVERT_OUT &&
-		ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
-}
-
 /* Adjusts input/output images to IPU restrictions */
 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
 			      enum ipu_rotate_mode rot_mode)
 {
 	const struct ipu_image_pixfmt *infmt, *outfmt;
-	unsigned int num_in_rows, num_in_cols;
-	unsigned int num_out_rows, num_out_cols;
 	u32 w_align, h_align;
 
 	infmt = get_format(in->pix.pixelformat);
@@ -1286,36 +1916,31 @@ void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
 					in->pix.height / 4);
 	}
 
-	/* get tiling rows/cols from output format */
-	num_out_rows = num_stripes(out->pix.height);
-	num_out_cols = num_stripes(out->pix.width);
-	if (ipu_rot_mode_is_irt(rot_mode)) {
-		num_in_rows = num_out_cols;
-		num_in_cols = num_out_rows;
-	} else {
-		num_in_rows = num_out_rows;
-		num_in_cols = num_out_cols;
-	}
-
 	/* align input width/height */
-	w_align = ilog2(tile_width_align(infmt) * num_in_cols);
-	h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
-			num_in_rows);
+	w_align = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, rot_mode));
+	h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, rot_mode));
 	in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
 	in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
 
 	/* align output width/height */
-	w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
-	h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
-			num_out_rows);
+	w_align = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
+	h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
 	out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
 	out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
 
 	/* set input/output strides and image sizes */
-	in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
-	in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
-	out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
-	out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+	in->pix.bytesperline = infmt->planar ?
+		clamp_align(in->pix.width, 2 << w_align, MAX_W, w_align) :
+		clamp_align((in->pix.width * infmt->bpp) >> 3,
+			    2 << w_align, MAX_W, w_align);
+	in->pix.sizeimage = infmt->planar ?
+		(in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
+		in->pix.height * in->pix.bytesperline;
+	out->pix.bytesperline = outfmt->planar ? out->pix.width :
+		(out->pix.width * outfmt->bpp) >> 3;
+	out->pix.sizeimage = outfmt->planar ?
+		(out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
+		out->pix.height * out->pix.bytesperline;
 }
 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
 
@@ -1360,6 +1985,7 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
 	struct ipu_image_convert_chan *chan;
 	struct ipu_image_convert_ctx *ctx;
 	unsigned long flags;
+	unsigned int i;
 	bool get_res;
 	int ret;
 
@@ -1412,8 +2038,26 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
 	if (ret)
 		goto out_free;
 
+	ret = calc_image_resize_coefficients(ctx, in, out);
+	if (ret)
+		goto out_free;
+
 	calc_out_tile_map(ctx);
 
+	find_seams(ctx, s_image, d_image);
+
+	calc_tile_dimensions(ctx, s_image);
+	ret = calc_tile_offsets(ctx, s_image);
+	if (ret)
+		goto out_free;
+
+	calc_tile_dimensions(ctx, d_image);
+	ret = calc_tile_offsets(ctx, d_image);
+	if (ret)
+		goto out_free;
+
+	calc_tile_resize_coefficients(ctx);
+
 	dump_format(ctx, s_image);
 	dump_format(ctx, d_image);
 
@@ -1429,21 +2073,51 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
 	 * for every tile, and therefore would have to be updated for
 	 * each buffer which is not possible. So double-buffering is
 	 * impossible when either the source or destination images are
-	 * a planar format (YUV420, YUV422P, etc.).
+	 * a planar format (YUV420, YUV422P, etc.). Further, differently
+	 * sized tiles or different resizing coefficients per tile
+	 * prevent double-buffering as well.
 	 */
 	ctx->double_buffering = (ctx->num_tiles > 1 &&
 				 !s_image->fmt->planar &&
 				 !d_image->fmt->planar);
+	for (i = 1; i < ctx->num_tiles; i++) {
+		if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
+		    ctx->in.tile[i].height != ctx->in.tile[0].height ||
+		    ctx->out.tile[i].width != ctx->out.tile[0].width ||
+		    ctx->out.tile[i].height != ctx->out.tile[0].height) {
+			ctx->double_buffering = false;
+			break;
+		}
+	}
+	for (i = 1; i < ctx->in.num_cols; i++) {
+		if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
+			ctx->double_buffering = false;
+			break;
+		}
+	}
+	for (i = 1; i < ctx->in.num_rows; i++) {
+		if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
+			ctx->double_buffering = false;
+			break;
+		}
+	}
 
 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		unsigned long intermediate_size = d_image->tile[0].size;
+
+		for (i = 1; i < ctx->num_tiles; i++) {
+			if (d_image->tile[i].size > intermediate_size)
+				intermediate_size = d_image->tile[i].size;
+		}
+
 		ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
-				    d_image->tile[0].size);
+				    intermediate_size);
 		if (ret)
 			goto out_free;
 		if (ctx->double_buffering) {
 			ret = alloc_dma_buf(priv,
 					    &ctx->rot_intermediate[1],
-					    d_image->tile[0].size);
+					    intermediate_size);
 			if (ret)
 				goto out_free_dmabuf0;
 		}
@@ -1524,16 +2198,13 @@ int ipu_image_convert_queue(struct ipu_image_convert_run *run)
 EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
 
 /* Abort any active or pending conversions for this context */
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
 {
 	struct ipu_image_convert_chan *chan = ctx->chan;
 	struct ipu_image_convert_priv *priv = chan->priv;
 	struct ipu_image_convert_run *run, *active_run, *tmp;
 	unsigned long flags;
 	int run_count, ret;
-	bool need_abort;
-
-	reinit_completion(&ctx->aborted);
 
 	spin_lock_irqsave(&chan->irqlock, flags);
 
@@ -1549,22 +2220,28 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
 	active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
 		chan->current_run : NULL;
 
-	need_abort = (run_count || active_run);
+	if (active_run)
+		reinit_completion(&ctx->aborted);
 
-	ctx->aborting = need_abort;
+	ctx->aborting = true;
 
 	spin_unlock_irqrestore(&chan->irqlock, flags);
 
-	if (!need_abort) {
+	if (!run_count && !active_run) {
 		dev_dbg(priv->ipu->dev,
 			"%s: task %u: no abort needed for ctx %p\n",
 			__func__, chan->ic_task, ctx);
 		return;
 	}
 
+	if (!active_run) {
+		empty_done_q(chan);
+		return;
+	}
+
 	dev_dbg(priv->ipu->dev,
-		"%s: task %u: wait for completion: %d runs, active run %p\n",
-		__func__, chan->ic_task, run_count, active_run);
+		"%s: task %u: wait for completion: %d runs\n",
+		__func__, chan->ic_task, run_count);
 
 	ret = wait_for_completion_timeout(&ctx->aborted,
 					  msecs_to_jiffies(10000));
@@ -1572,7 +2249,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
 		dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
 		force_abort(ctx);
 	}
+}
 
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+	__ipu_image_convert_abort(ctx);
 	ctx->aborting = false;
 }
 EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
@@ -1586,7 +2267,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
 	bool put_res;
 
 	/* make sure no runs are hanging around */
-	ipu_image_convert_abort(ctx);
+	__ipu_image_convert_abort(ctx);
 
 	dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
 		chan->ic_task, ctx);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c61b045..dc8e039 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -676,7 +676,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
 	vga_arbiter_check_bridge_sharing(vgadev);
 
 	/* Add to the list */
-	list_add(&vgadev->list, &vga_list);
+	list_add_tail(&vgadev->list, &vga_list);
 	vga_count++;
 	vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
 		vga_iostate_to_str(vgadev->decodes),
@@ -1408,6 +1408,18 @@ static void __init vga_arb_select_default_device(void)
 	struct vga_device *vgadev;
 
 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
+	u64 base = screen_info.lfb_base;
+	u64 size = screen_info.lfb_size;
+	u64 limit;
+	resource_size_t start, end;
+	unsigned long flags;
+	int i;
+
+	if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+		base |= (u64)screen_info.ext_lfb_base << 32;
+
+	limit = base + size;
+
 	list_for_each_entry(vgadev, &vga_list, list) {
 		struct device *dev = &vgadev->pdev->dev;
 		/*
@@ -1418,11 +1430,6 @@ static void __init vga_arb_select_default_device(void)
 		 * Select the device owning the boot framebuffer if there is
 		 * one.
 		 */
-		resource_size_t start, end, limit;
-		unsigned long flags;
-		int i;
-
-		limit = screen_info.lfb_base + screen_info.lfb_size;
 
 		/* Does firmware framebuffer belong to us? */
 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1437,7 +1444,7 @@ static void __init vga_arb_select_default_device(void)
 			if (!start || !end)
 				continue;
 
-			if (screen_info.lfb_base < start || limit >= end)
+			if (base < start || limit >= end)
 				continue;
 
 			if (!vga_default_device())
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f3899cc..4551bca 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -550,7 +550,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_
 	buffer[3] = 0;
 	buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
 
-	if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+	if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
 		v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
 		return;
 	}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 9eb7c70..9f99ef3 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2420,7 +2420,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index,
 		buffer[i + 3] = infoframe_read(sd,
 				       adv76xx_cri[index].payload_addr + i);
 
-	if (hdmi_infoframe_unpack(frame, buffer) < 0) {
+	if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
 		v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
 			 adv76xx_cri[index].desc);
 		return -ENOENT;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 4721d49..0e6384f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2574,7 +2574,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
 	for (i = 0; i < len; i++)
 		buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
 
-	if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+	if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
 		v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
 		return;
 	}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 41d470d..22cafc0 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -444,7 +444,7 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
 
 	i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
 
-	if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+	if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
 		v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
 		return;
 	}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index c4c2a61..e8613e3 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1253,7 +1253,7 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
 
 	/* read data */
 	len = io_readn(sd, addr, sizeof(buffer), buffer);
-	err = hdmi_infoframe_unpack(&frame, buffer);
+	err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
 	if (err) {
 		v4l_err(state->client,
 			"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,7 +1928,7 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
 	/* read data */
 	len = io_readn(sd, addr, sizeof(buffer), buffer);
 	v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
-	err = hdmi_infoframe_unpack(&frame, buffer);
+	err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
 	if (err) {
 		v4l_err(state->client,
 			"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index 2570304..d3e23dd 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -279,7 +279,6 @@ static struct drm_driver driver = {
 	.gem_free_object_unlocked = vbox_gem_free_object,
 	.dumb_create = vbox_dumb_create,
 	.dumb_map_offset = vbox_dumb_mmap_offset,
-	.dumb_destroy = drm_gem_dumb_destroy,
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 73395a75..fa933d4 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -99,8 +99,6 @@ struct vbox_private {
 	int fb_mtrr;
 
 	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
 	} ttm;
 
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 5ecfa76..b36ec01 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -35,61 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
 	return container_of(bd, struct vbox_private, ttm.bdev);
 }
 
-static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
-{
-	return ttm_mem_global_init(ref->object);
-}
-
-static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
-{
-	ttm_mem_global_release(ref->object);
-}
-
-/**
- * Adds the vbox memory manager object/structures to the global memory manager.
- */
-static int vbox_ttm_global_init(struct vbox_private *vbox)
-{
-	struct drm_global_reference *global_ref;
-	int ret;
-
-	global_ref = &vbox->ttm.mem_global_ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-	global_ref->size = sizeof(struct ttm_mem_global);
-	global_ref->init = &vbox_ttm_mem_global_init;
-	global_ref->release = &vbox_ttm_mem_global_release;
-	ret = drm_global_item_ref(global_ref);
-	if (ret) {
-		DRM_ERROR("Failed setting up TTM memory subsystem.\n");
-		return ret;
-	}
-
-	vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
-	global_ref = &vbox->ttm.bo_global_ref.ref;
-	global_ref->global_type = DRM_GLOBAL_TTM_BO;
-	global_ref->size = sizeof(struct ttm_bo_global);
-	global_ref->init = &ttm_bo_global_init;
-	global_ref->release = &ttm_bo_global_release;
-
-	ret = drm_global_item_ref(global_ref);
-	if (ret) {
-		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&vbox->ttm.mem_global_ref);
-		return ret;
-	}
-
-	return 0;
-}
-
-/**
- * Removes the vbox memory manager object from the global memory manager.
- */
-static void vbox_ttm_global_release(struct vbox_private *vbox)
-{
-	drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&vbox->ttm.mem_global_ref);
-}
-
 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
 {
 	struct vbox_bo *bo;
@@ -227,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
 	struct drm_device *dev = &vbox->ddev;
 	struct ttm_bo_device *bdev = &vbox->ttm.bdev;
 
-	ret = vbox_ttm_global_init(vbox);
-	if (ret)
-		return ret;
-
 	ret = ttm_bo_device_init(&vbox->ttm.bdev,
-				 vbox->ttm.bo_global_ref.ref.object,
 				 &vbox_bo_driver,
 				 dev->anon_inode->i_mapping,
 				 DRM_FILE_PAGE_OFFSET, true);
 	if (ret) {
 		DRM_ERROR("Error initialising bo driver; %d\n", ret);
-		goto err_ttm_global_release;
+		return ret;
 	}
 
 	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -260,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
 
 err_device_release:
 	ttm_bo_device_release(&vbox->ttm.bdev);
-err_ttm_global_release:
-	vbox_ttm_global_release(vbox);
 	return ret;
 }
 
@@ -275,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
 	arch_phys_wc_del(vbox->fb_mtrr);
 #endif
 	ttm_bo_device_release(&vbox->ttm.bdev);
-	vbox_ttm_global_release(vbox);
 }
 
 void vbox_ttm_placement(struct vbox_bo *bo, int domain)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 8a3e8f6..799ae49 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -31,7 +31,7 @@
 
 #define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
 
-static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
+static u8 hdmi_infoframe_checksum(const u8 *ptr, size_t size)
 {
 	u8 csum = 0;
 	size_t i;
@@ -68,8 +68,36 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
 }
 EXPORT_SYMBOL(hdmi_avi_infoframe_init);
 
+static int hdmi_avi_infoframe_check_only(const struct hdmi_avi_infoframe *frame)
+{
+	if (frame->type != HDMI_INFOFRAME_TYPE_AVI ||
+	    frame->version != 2 ||
+	    frame->length != HDMI_AVI_INFOFRAME_SIZE)
+		return -EINVAL;
+
+	if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
+		return -EINVAL;
+
+	return 0;
+}
+
 /**
- * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * hdmi_avi_infoframe_check() - check a HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame)
+{
+	return hdmi_avi_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_check);
+
+/**
+ * hdmi_avi_infoframe_pack_only() - write HDMI AVI infoframe to binary buffer
  * @frame: HDMI AVI infoframe
  * @buffer: destination buffer
  * @size: size of buffer
@@ -82,20 +110,22 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_init);
  * Returns the number of bytes packed into the binary buffer or a negative
  * error code on failure.
  */
-ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
-				size_t size)
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+				     void *buffer, size_t size)
 {
 	u8 *ptr = buffer;
 	size_t length;
+	int ret;
+
+	ret = hdmi_avi_infoframe_check_only(frame);
+	if (ret)
+		return ret;
 
 	length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
 	if (size < length)
 		return -ENOSPC;
 
-	if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
-		return -EINVAL;
-
 	memset(buffer, 0, size);
 
 	ptr[0] = frame->type;
@@ -152,6 +182,36 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
 
 	return length;
 }
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack_only);
+
+/**
+ * hdmi_avi_infoframe_pack() - check a HDMI AVI infoframe,
+ *                             and write it to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame,
+				void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_avi_infoframe_check(frame);
+	if (ret)
+		return ret;
+
+	return hdmi_avi_infoframe_pack_only(frame, buffer, size);
+}
 EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
 
 /**
@@ -178,8 +238,33 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
 }
 EXPORT_SYMBOL(hdmi_spd_infoframe_init);
 
+static int hdmi_spd_infoframe_check_only(const struct hdmi_spd_infoframe *frame)
+{
+	if (frame->type != HDMI_INFOFRAME_TYPE_SPD ||
+	    frame->version != 1 ||
+	    frame->length != HDMI_SPD_INFOFRAME_SIZE)
+		return -EINVAL;
+
+	return 0;
+}
+
 /**
- * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * hdmi_spd_infoframe_check() - check a HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame)
+{
+	return hdmi_spd_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_check);
+
+/**
+ * hdmi_spd_infoframe_pack_only() - write HDMI SPD infoframe to binary buffer
  * @frame: HDMI SPD infoframe
  * @buffer: destination buffer
  * @size: size of buffer
@@ -192,11 +277,16 @@ EXPORT_SYMBOL(hdmi_spd_infoframe_init);
  * Returns the number of bytes packed into the binary buffer or a negative
  * error code on failure.
  */
-ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
-				size_t size)
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+				     void *buffer, size_t size)
 {
 	u8 *ptr = buffer;
 	size_t length;
+	int ret;
+
+	ret = hdmi_spd_infoframe_check_only(frame);
+	if (ret)
+		return ret;
 
 	length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
@@ -222,6 +312,36 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
 
 	return length;
 }
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack_only);
+
+/**
+ * hdmi_spd_infoframe_pack() - check a HDMI SPD infoframe,
+ *                             and write it to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame,
+				void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_spd_infoframe_check(frame);
+	if (ret)
+		return ret;
+
+	return hdmi_spd_infoframe_pack_only(frame, buffer, size);
+}
 EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
 
 /**
@@ -242,8 +362,33 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
 }
 EXPORT_SYMBOL(hdmi_audio_infoframe_init);
 
+static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *frame)
+{
+	if (frame->type != HDMI_INFOFRAME_TYPE_AUDIO ||
+	    frame->version != 1 ||
+	    frame->length != HDMI_AUDIO_INFOFRAME_SIZE)
+		return -EINVAL;
+
+	return 0;
+}
+
 /**
- * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * hdmi_audio_infoframe_check() - check a HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame)
+{
+	return hdmi_audio_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+
+/**
+ * hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
  * @frame: HDMI audio infoframe
  * @buffer: destination buffer
  * @size: size of buffer
@@ -256,12 +401,17 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_init);
  * Returns the number of bytes packed into the binary buffer or a negative
  * error code on failure.
  */
-ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
-				  void *buffer, size_t size)
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+				       void *buffer, size_t size)
 {
 	unsigned char channels;
 	u8 *ptr = buffer;
 	size_t length;
+	int ret;
+
+	ret = hdmi_audio_infoframe_check_only(frame);
+	if (ret)
+		return ret;
 
 	length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
@@ -297,6 +447,36 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
 
 	return length;
 }
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_only);
+
+/**
+ * hdmi_audio_infoframe_pack() - check a HDMI Audio infoframe,
+ *                               and write it to binary buffer
+ * @frame: HDMI Audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+				  void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_audio_infoframe_check(frame);
+	if (ret)
+		return ret;
+
+	return hdmi_audio_infoframe_pack_only(frame, buffer, size);
+}
 EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
 
 /**
@@ -319,6 +499,7 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
 	 * value
 	 */
 	frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+	frame->length = 4;
 
 	return 0;
 }
@@ -335,8 +516,42 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
 		return 4;
 }
 
+static int hdmi_vendor_infoframe_check_only(const struct hdmi_vendor_infoframe *frame)
+{
+	if (frame->type != HDMI_INFOFRAME_TYPE_VENDOR ||
+	    frame->version != 1 ||
+	    frame->oui != HDMI_IEEE_OUI)
+		return -EINVAL;
+
+	/* only one of those can be supplied */
+	if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+		return -EINVAL;
+
+	if (frame->length != hdmi_vendor_infoframe_length(frame))
+		return -EINVAL;
+
+	return 0;
+}
+
 /**
- * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * hdmi_vendor_infoframe_check() - check a HDMI vendor infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame)
+{
+	frame->length = hdmi_vendor_infoframe_length(frame);
+
+	return hdmi_vendor_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_check);
+
+/**
+ * hdmi_vendor_infoframe_pack_only() - write a HDMI vendor infoframe to binary buffer
  * @frame: HDMI infoframe
  * @buffer: destination buffer
  * @size: size of buffer
@@ -349,17 +564,16 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
  * Returns the number of bytes packed into the binary buffer or a negative
  * error code on failure.
  */
-ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
-				 void *buffer, size_t size)
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+					void *buffer, size_t size)
 {
 	u8 *ptr = buffer;
 	size_t length;
+	int ret;
 
-	/* only one of those can be supplied */
-	if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
-		return -EINVAL;
-
-	frame->length = hdmi_vendor_infoframe_length(frame);
+	ret = hdmi_vendor_infoframe_check_only(frame);
+	if (ret)
+		return ret;
 
 	length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
@@ -394,24 +608,134 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
 
 	return length;
 }
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack_only);
+
+/**
+ * hdmi_vendor_infoframe_pack() - check a HDMI Vendor infoframe,
+ *                                and write it to binary buffer
+ * @frame: HDMI Vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+				   void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_vendor_infoframe_check(frame);
+	if (ret)
+		return ret;
+
+	return hdmi_vendor_infoframe_pack_only(frame, buffer, size);
+}
 EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
 
-/*
- * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
- */
-static ssize_t
-hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
-			   void *buffer, size_t size)
+static int
+hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *frame)
 {
+	if (frame->any.type != HDMI_INFOFRAME_TYPE_VENDOR ||
+	    frame->any.version != 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * hdmi_vendor_any_infoframe_check() - check a vendor infoframe
+ */
+static int
+hdmi_vendor_any_infoframe_check(union hdmi_vendor_any_infoframe *frame)
+{
+	int ret;
+
+	ret = hdmi_vendor_any_infoframe_check_only(frame);
+	if (ret)
+		return ret;
+
 	/* we only know about HDMI vendor infoframes */
 	if (frame->any.oui != HDMI_IEEE_OUI)
 		return -EINVAL;
 
-	return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+	return hdmi_vendor_infoframe_check(&frame->hdmi);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack_only() - write a vendor infoframe to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack_only(const union hdmi_vendor_any_infoframe *frame,
+				    void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_vendor_any_infoframe_check_only(frame);
+	if (ret)
+		return ret;
+
+	/* we only know about HDMI vendor infoframes */
+	if (frame->any.oui != HDMI_IEEE_OUI)
+		return -EINVAL;
+
+	return hdmi_vendor_infoframe_pack_only(&frame->hdmi, buffer, size);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - check a vendor infoframe,
+ *                                    and write it to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+			       void *buffer, size_t size)
+{
+	int ret;
+
+	ret = hdmi_vendor_any_infoframe_check(frame);
+	if (ret)
+		return ret;
+
+	return hdmi_vendor_any_infoframe_pack_only(frame, buffer, size);
 }
 
 /**
- * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * hdmi_infoframe_check() - check a HDMI infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+hdmi_infoframe_check(union hdmi_infoframe *frame)
+{
+	switch (frame->any.type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		return hdmi_avi_infoframe_check(&frame->avi);
+	case HDMI_INFOFRAME_TYPE_SPD:
+		return hdmi_spd_infoframe_check(&frame->spd);
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		return hdmi_audio_infoframe_check(&frame->audio);
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		return hdmi_vendor_any_infoframe_check(&frame->vendor);
+	default:
+		WARN(1, "Bad infoframe type %d\n", frame->any.type);
+		return -EINVAL;
+	}
+}
+EXPORT_SYMBOL(hdmi_infoframe_check);
+
+/**
+ * hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
  * @frame: HDMI infoframe
  * @buffer: destination buffer
  * @size: size of buffer
@@ -425,7 +749,56 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
  * error code on failure.
  */
 ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+	ssize_t length;
+
+	switch (frame->any.type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		length = hdmi_avi_infoframe_pack_only(&frame->avi,
+						      buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_SPD:
+		length = hdmi_spd_infoframe_pack_only(&frame->spd,
+						      buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		length = hdmi_audio_infoframe_pack_only(&frame->audio,
+							buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		length = hdmi_vendor_any_infoframe_pack_only(&frame->vendor,
+							     buffer, size);
+		break;
+	default:
+		WARN(1, "Bad infoframe type %d\n", frame->any.type);
+		length = -EINVAL;
+	}
+
+	return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack_only);
+
+/**
+ * hdmi_infoframe_pack() - check a HDMI infoframe,
+ *                         and write it to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame,
+		    void *buffer, size_t size)
 {
 	ssize_t length;
 
@@ -471,7 +844,7 @@ static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
 
 static void hdmi_infoframe_log_header(const char *level,
 				      struct device *dev,
-				      struct hdmi_any_infoframe *frame)
+				      const struct hdmi_any_infoframe *frame)
 {
 	hdmi_log("HDMI infoframe: %s, version %u, length %u\n",
 		hdmi_infoframe_type_get_name(frame->type),
@@ -673,10 +1046,10 @@ hdmi_content_type_get_name(enum hdmi_content_type content_type)
  */
 static void hdmi_avi_infoframe_log(const char *level,
 				   struct device *dev,
-				   struct hdmi_avi_infoframe *frame)
+				   const struct hdmi_avi_infoframe *frame)
 {
 	hdmi_infoframe_log_header(level, dev,
-				  (struct hdmi_any_infoframe *)frame);
+				  (const struct hdmi_any_infoframe *)frame);
 
 	hdmi_log("    colorspace: %s\n",
 			hdmi_colorspace_get_name(frame->colorspace));
@@ -750,12 +1123,12 @@ static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
  */
 static void hdmi_spd_infoframe_log(const char *level,
 				   struct device *dev,
-				   struct hdmi_spd_infoframe *frame)
+				   const struct hdmi_spd_infoframe *frame)
 {
 	u8 buf[17];
 
 	hdmi_infoframe_log_header(level, dev,
-				  (struct hdmi_any_infoframe *)frame);
+				  (const struct hdmi_any_infoframe *)frame);
 
 	memset(buf, 0, sizeof(buf));
 
@@ -886,10 +1259,10 @@ hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
  */
 static void hdmi_audio_infoframe_log(const char *level,
 				     struct device *dev,
-				     struct hdmi_audio_infoframe *frame)
+				     const struct hdmi_audio_infoframe *frame)
 {
 	hdmi_infoframe_log_header(level, dev,
-				  (struct hdmi_any_infoframe *)frame);
+				  (const struct hdmi_any_infoframe *)frame);
 
 	if (frame->channels)
 		hdmi_log("    channels: %u\n", frame->channels - 1);
@@ -949,12 +1322,12 @@ hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
 static void
 hdmi_vendor_any_infoframe_log(const char *level,
 			      struct device *dev,
-			      union hdmi_vendor_any_infoframe *frame)
+			      const union hdmi_vendor_any_infoframe *frame)
 {
-	struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+	const struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
 
 	hdmi_infoframe_log_header(level, dev,
-				  (struct hdmi_any_infoframe *)frame);
+				  (const struct hdmi_any_infoframe *)frame);
 
 	if (frame->any.oui != HDMI_IEEE_OUI) {
 		hdmi_log("    not a HDMI vendor infoframe\n");
@@ -984,7 +1357,7 @@ hdmi_vendor_any_infoframe_log(const char *level,
  */
 void hdmi_infoframe_log(const char *level,
 			struct device *dev,
-			union hdmi_infoframe *frame)
+			const union hdmi_infoframe *frame)
 {
 	switch (frame->any.type) {
 	case HDMI_INFOFRAME_TYPE_AVI:
@@ -1005,8 +1378,9 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
 
 /**
  * hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe
- * @buffer: source buffer
  * @frame: HDMI AVI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
  *
  * Unpacks the information contained in binary @buffer into a structured
  * @frame of the HDMI Auxiliary Video (AVI) information frame.
@@ -1016,11 +1390,14 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
  * Returns 0 on success or a negative error code on failure.
  */
 static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
-				     void *buffer)
+				     const void *buffer, size_t size)
 {
-	u8 *ptr = buffer;
+	const u8 *ptr = buffer;
 	int ret;
 
+	if (size < HDMI_INFOFRAME_SIZE(AVI))
+		return -EINVAL;
+
 	if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI ||
 	    ptr[1] != 2 ||
 	    ptr[2] != HDMI_AVI_INFOFRAME_SIZE)
@@ -1068,8 +1445,9 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
 
 /**
  * hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe
- * @buffer: source buffer
  * @frame: HDMI SPD infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
  *
  * Unpacks the information contained in binary @buffer into a structured
  * @frame of the HDMI Source Product Description (SPD) information frame.
@@ -1079,11 +1457,14 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
  * Returns 0 on success or a negative error code on failure.
  */
 static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
-				     void *buffer)
+				     const void *buffer, size_t size)
 {
-	u8 *ptr = buffer;
+	const u8 *ptr = buffer;
 	int ret;
 
+	if (size < HDMI_INFOFRAME_SIZE(SPD))
+		return -EINVAL;
+
 	if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD ||
 	    ptr[1] != 1 ||
 	    ptr[2] != HDMI_SPD_INFOFRAME_SIZE) {
@@ -1106,8 +1487,9 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
 
 /**
  * hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe
- * @buffer: source buffer
  * @frame: HDMI Audio infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
  *
  * Unpacks the information contained in binary @buffer into a structured
  * @frame of the HDMI Audio information frame.
@@ -1117,11 +1499,14 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
  * Returns 0 on success or a negative error code on failure.
  */
 static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
-				       void *buffer)
+				       const void *buffer, size_t size)
 {
-	u8 *ptr = buffer;
+	const u8 *ptr = buffer;
 	int ret;
 
+	if (size < HDMI_INFOFRAME_SIZE(AUDIO))
+		return -EINVAL;
+
 	if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO ||
 	    ptr[1] != 1 ||
 	    ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) {
@@ -1151,8 +1536,9 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
 
 /**
  * hdmi_vendor_infoframe_unpack() - unpack binary buffer to a HDMI vendor infoframe
- * @buffer: source buffer
  * @frame: HDMI Vendor infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
  *
  * Unpacks the information contained in binary @buffer into a structured
  * @frame of the HDMI Vendor information frame.
@@ -1163,14 +1549,17 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
  */
 static int
 hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
-				 void *buffer)
+				 const void *buffer, size_t size)
 {
-	u8 *ptr = buffer;
+	const u8 *ptr = buffer;
 	size_t length;
 	int ret;
 	u8 hdmi_video_format;
 	struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
 
+	if (size < HDMI_INFOFRAME_HEADER_SIZE)
+		return -EINVAL;
+
 	if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
 	    ptr[1] != 1 ||
 	    (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
@@ -1178,6 +1567,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
 
 	length = ptr[2];
 
+	if (size < HDMI_INFOFRAME_HEADER_SIZE + length)
+		return -EINVAL;
+
 	if (hdmi_infoframe_checksum(buffer,
 				    HDMI_INFOFRAME_HEADER_SIZE + length) != 0)
 		return -EINVAL;
@@ -1224,8 +1616,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
 
 /**
  * hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
- * @buffer: source buffer
  * @frame: HDMI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
  *
  * Unpacks the information contained in binary buffer @buffer into a structured
  * @frame of a HDMI infoframe.
@@ -1234,23 +1627,27 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
  *
  * Returns 0 on success or a negative error code on failure.
  */
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer)
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+			  const void *buffer, size_t size)
 {
 	int ret;
-	u8 *ptr = buffer;
+	const u8 *ptr = buffer;
+
+	if (size < HDMI_INFOFRAME_HEADER_SIZE)
+		return -EINVAL;
 
 	switch (ptr[0]) {
 	case HDMI_INFOFRAME_TYPE_AVI:
-		ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer);
+		ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size);
 		break;
 	case HDMI_INFOFRAME_TYPE_SPD:
-		ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer);
+		ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size);
 		break;
 	case HDMI_INFOFRAME_TYPE_AUDIO:
-		ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer);
+		ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer, size);
 		break;
 	case HDMI_INFOFRAME_TYPE_VENDOR:
-		ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer);
+		ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer, size);
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252a..bb71db6 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
 }
 EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
 
-int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
+int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr)
 {
 	int err = 0;
 	int i;
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
 	return ret;
 }
 
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
 {
 	int i;
 	for (i = 0; ptr[i]; i++)
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ccb5aa8..9c56412 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -133,6 +133,7 @@ struct dw_hdmi_plat_data {
 	const struct dw_hdmi_phy_ops *phy_ops;
 	const char *phy_name;
 	void *phy_data;
+	unsigned int phy_force_vendor;
 
 	/* Synopsys PHY support */
 	const struct dw_hdmi_mpll_config *mpll_cfg;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index d9c6d54..48a671e7 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops {
 			     unsigned int *lane_mbps);
 };
 
+struct dw_mipi_dsi_host_ops {
+	int (*attach)(void *priv_data,
+		      struct mipi_dsi_device *dsi);
+	int (*detach)(void *priv_data,
+		      struct mipi_dsi_device *dsi);
+};
+
 struct dw_mipi_dsi_plat_data {
 	void __iomem *base;
 	unsigned int max_data_lanes;
@@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data {
 					   const struct drm_display_mode *mode);
 
 	const struct dw_mipi_dsi_phy_ops *phy_ops;
+	const struct dw_mipi_dsi_host_ops *host_ops;
 
 	void *priv_data;
 };
@@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
 				      const struct dw_mipi_dsi_plat_data
 				      *plat_data);
 void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
-struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
-				     struct drm_encoder *encoder,
-				     const struct dw_mipi_dsi_plat_data
-				     *plat_data);
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
 void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
 
 #endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 0535042..bdb0d55 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
 #include <drm/drm_hashtab.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_os_linux.h>
@@ -110,4 +109,10 @@ static inline bool drm_can_sleep(void)
 	return true;
 }
 
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
 #endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1e810e0..f9b3583 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -265,7 +265,6 @@ struct __drm_private_objs_state {
  * struct drm_atomic_state - the global state object for atomic updates
  * @ref: count of all references to this state (will not be freed until zero)
  * @dev: parent DRM device
- * @allow_modeset: allow full modeset
  * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
  * @async_update: hint for asynchronous plane update
  * @planes: pointer to array of structures with per-plane data
@@ -284,6 +283,15 @@ struct drm_atomic_state {
 	struct kref ref;
 
 	struct drm_device *dev;
+
+	/**
+	 * @allow_modeset:
+	 *
+	 * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+	 * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+	 * never consult this flag, instead looking at the output of
+	 * drm_atomic_crtc_needs_modeset().
+	 */
 	bool allow_modeset : 1;
 	bool legacy_cursor_update : 1;
 	bool async_update : 1;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 657af7b..58214be 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_state_helper.h>
 #include <drm/drm_util.h>
 
 struct drm_atomic_state;
@@ -126,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
 int drm_atomic_helper_disable_all(struct drm_device *dev,
 				  struct drm_modeset_acquire_ctx *ctx);
 void drm_atomic_helper_shutdown(struct drm_device *dev);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+				  struct drm_modeset_acquire_ctx *ctx);
 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 					      struct drm_modeset_acquire_ctx *ctx);
@@ -144,51 +148,10 @@ int drm_atomic_helper_page_flip_target(
 				uint32_t flags,
 				uint32_t target,
 				struct drm_modeset_acquire_ctx *ctx);
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector);
-
-/* default implementations for state handling */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
-					      struct drm_crtc_state *state);
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
-					  struct drm_crtc_state *state);
-
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
-				     struct drm_plane_state *state);
-void drm_atomic_helper_plane_reset(struct drm_plane *plane);
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
-					       struct drm_plane_state *state);
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
-					  struct drm_plane_state *state);
-
-void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
-					 struct drm_connector_state *conn_state);
-void drm_atomic_helper_connector_reset(struct drm_connector *connector);
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
-					   struct drm_connector_state *state);
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
-				  struct drm_modeset_acquire_ctx *ctx);
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
-					  struct drm_connector_state *state);
 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 				       u16 *red, u16 *green, u16 *blue,
 				       uint32_t size,
 				       struct drm_modeset_acquire_ctx *ctx);
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
-						     struct drm_private_state *state);
 
 /**
  * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
new file mode 100644
index 0000000..66c92cbd
--- /dev/null
+++ b/include/drm/drm_atomic_state_helper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_private_obj;
+struct drm_private_state;
+struct drm_modeset_acquire_ctx;
+struct drm_device;
+
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+					      struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+					  struct drm_crtc_state *state);
+
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+				     struct drm_plane_state *state);
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+					       struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+					  struct drm_plane_state *state);
+
+void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
+					 struct drm_connector_state *conn_state);
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+					   struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+					  struct drm_connector_state *state);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+						     struct drm_private_state *state);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 9ccad6b..9be2181 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -508,6 +508,18 @@ struct drm_connector_state {
 	 * drm_writeback_signal_completion()
 	 */
 	struct drm_writeback_job *writeback_job;
+
+	/**
+	 * @max_requested_bpc: Connector property to limit the maximum bit
+	 * depth of the pixels.
+	 */
+	u8 max_requested_bpc;
+
+	/**
+	 * @max_bpc: Connector max_bpc based on the requested max_bpc property
+	 * and the connector bpc limitations obtained from edid.
+	 */
+	u8 max_bpc;
 };
 
 /**
@@ -960,6 +972,17 @@ struct drm_connector {
 	struct drm_property *scaling_mode_property;
 
 	/**
+	 * @vrr_capable_property: Optional property to help userspace
+	 * query hardware support for variable refresh rate on a connector.
+	 * connector. Drivers can add the property to a connector by
+	 * calling drm_connector_attach_vrr_capable_property().
+	 *
+	 * This should be updated only by calling
+	 * drm_connector_set_vrr_capable_property().
+	 */
+	struct drm_property *vrr_capable_property;
+
+	/**
 	 * @content_protection_property: DRM ENUM property for content
 	 * protection. See drm_connector_attach_content_protection_property().
 	 */
@@ -973,6 +996,12 @@ struct drm_connector {
 	 */
 	struct drm_property_blob *path_blob_ptr;
 
+	/**
+	 * @max_bpc_property: Default connector property for the max bpc to be
+	 * driven out of the connector.
+	 */
+	struct drm_property *max_bpc_property;
+
 #define DRM_CONNECTOR_POLL_HPD (1 << 0)
 #define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
 #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1133,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev,
 		       struct drm_connector *connector,
 		       const struct drm_connector_funcs *funcs,
 		       int connector_type);
+void drm_connector_attach_edid_property(struct drm_connector *connector);
 int drm_connector_register(struct drm_connector *connector);
 void drm_connector_unregister(struct drm_connector *connector);
 int drm_connector_attach_encoder(struct drm_connector *connector,
@@ -1192,30 +1222,6 @@ static inline void drm_connector_put(struct drm_connector *connector)
 }
 
 /**
- * drm_connector_reference - acquire a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_get() and should not be
- * used by new code.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
-	drm_connector_get(connector);
-}
-
-/**
- * drm_connector_unreference - release a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_put() and should not be
- * used by new code.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
-	drm_connector_put(connector);
-}
-
-/**
  * drm_connector_is_unregistered - has the connector been unregistered from
  * userspace?
  * @connector: DRM connector
@@ -1250,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 int drm_connector_attach_content_type_property(struct drm_connector *dev);
 int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
 					       u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+		struct drm_connector *connector);
 int drm_connector_attach_content_protection_property(
 		struct drm_connector *connector);
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1266,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
 				       const struct edid *edid);
 void drm_connector_set_link_status_property(struct drm_connector *connector,
 					    uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+		struct drm_connector *connector, bool capable);
 int drm_connector_init_panel_orientation_property(
 	struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+					  int min, int max);
 
 /**
  * struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b21437b..39c3900 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
 	u32 pageflip_flags;
 
 	/**
+	 * @vrr_enabled:
+	 *
+	 * Indicates if variable refresh rate should be enabled for the CRTC.
+	 * Support for the requested vrr state will depend on driver and
+	 * hardware capabiltiy - lacking support is not treated as failure.
+	 */
+	bool vrr_enabled;
+
+	/**
 	 * @event:
 	 *
 	 * Optional pointer to a DRM event to signal upon completion of the
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 6914633..d65f034 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 void drm_helper_resume_force_mode(struct drm_device *dev);
 
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
-			     struct drm_display_mode *adjusted_mode, int x, int y,
-			     struct drm_framebuffer *old_fb);
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-				  struct drm_framebuffer *old_fb);
-
 /* drm_probe_helper.c */
 int drm_helper_probe_single_connector_modes(struct drm_connector
 					    *connector, uint32_t maxX,
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
new file mode 100644
index 0000000..4487660
--- /dev/null
+++ b/include/drm/drm_damage_helper.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ *
+ **************************************************************************/
+
+#ifndef DRM_DAMAGE_HELPER_H_
+#define DRM_DAMAGE_HELPER_H_
+
+#include <drm/drm_atomic_helper.h>
+
+/**
+ * drm_atomic_for_each_plane_damage - Iterator macro for plane damage.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Note that if the first call to iterator macro return false then no need to do
+ * plane update. Iterator will return full plane src when damage is not passed
+ * by user-space.
+ */
+#define drm_atomic_for_each_plane_damage(iter, rect) \
+	while (drm_atomic_helper_damage_iter_next(iter, rect))
+
+/**
+ * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator.
+ *
+ * This structure tracks state needed to walk the list of plane damage clips.
+ */
+struct drm_atomic_helper_damage_iter {
+	/* private: Plane src in whole number. */
+	struct drm_rect plane_src;
+	/* private: Rectangles in plane damage blob. */
+	const struct drm_rect *clips;
+	/* private: Number of rectangles in plane damage blob. */
+	uint32_t num_clips;
+	/* private: Current clip iterator is advancing on. */
+	uint32_t curr_clip;
+	/* private: Whether need full plane update. */
+	bool full_update;
+};
+
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+					  struct drm_plane_state *plane_state);
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+			      struct drm_file *file_priv, unsigned int flags,
+			      unsigned int color, struct drm_clip_rect *clips,
+			      unsigned int num_clips);
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+				   const struct drm_plane_state *old_state,
+				   const struct drm_plane_state *new_state);
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+				   struct drm_rect *rect);
+
+/**
+ * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
+ * @state: Plane state.
+ *
+ * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
+ * can be obtained by simply typecasting &drm_mode_rect. This is because both
+ * are signed 32 and during drm_atomic_check_only() it is verified that damage
+ * clips are inside fb.
+ *
+ * Return: Clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_rect *
+drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
+{
+	return (struct drm_rect *)drm_plane_get_damage_clips(state);
+}
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a3843f..5736c94 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -231,6 +231,8 @@
 #define DP_DSC_MAX_BITS_PER_PIXEL_LOW       0x067   /* eDP 1.4 */
 
 #define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK  (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
 
 #define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
 # define DP_DSC_RGB                         (1 << 0)
@@ -279,6 +281,8 @@
 # define DP_DSC_THROUGHPUT_MODE_1_1000      (14 << 4)
 
 #define DP_DSC_MAX_SLICE_WIDTH              0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE        2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER       320
 
 #define DP_DSC_SLICE_CAP_2                  0x06D
 # define DP_DSC_16_PER_DP_DSC_SINK          (1 << 0)
@@ -477,6 +481,7 @@
 # define DP_AUX_FRAME_SYNC_VALID	    (1 << 0)
 
 #define DP_DSC_ENABLE                       0x160   /* DP 1.4 */
+# define DP_DECOMPRESSION_EN                (1 << 0)
 
 #define DP_PSR_EN_CFG			    0x170   /* XXX 1.2? */
 # define DP_PSR_ENABLE			    (1 << 0)
@@ -685,6 +690,8 @@
 # define DP_EDP_12			    0x01
 # define DP_EDP_13			    0x02
 # define DP_EDP_14			    0x03
+# define DP_EDP_14a                         0x04    /* eDP 1.4a */
+# define DP_EDP_14b                         0x05    /* eDP 1.4b */
 
 #define DP_EDP_GENERAL_CAP_1		    0x701
 # define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP		(1 << 0)
@@ -905,6 +912,57 @@
 #define DP_AUX_HDCP_KSV_FIFO		0x6802C
 #define DP_AUX_HDCP_AINFO		0x6803B
 
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET		0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET		0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET		0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET		0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET		0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET		0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET	0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET		0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET		0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET	0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET		0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET		0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET		0x69318
+#define	DP_HDCP_2_2_REG_RIV_OFFSET		0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET		0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET	0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET		0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET	0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET		0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET	0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET		0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET	0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET		0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET		0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET	0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET		0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET		DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET	DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET	DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET	DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET	DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+						DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET		DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET	DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET		DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET	DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET		DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET	DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET	DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN		1
+#define HDCP_2_2_DP_RXSTATUS_READY(x)		((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x)		((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x)		((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x)	((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x)	((x) & BIT(4))
+
 /* DP 1.2 Sideband message defines */
 /* peer device type - DP 1.2a Table 2-92 */
 #define DP_PEER_DEVICE_NONE		0x0
@@ -963,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
 
 #define DP_BRANCH_OUI_HEADER_SIZE	0xc
 #define DP_RECEIVER_CAP_SIZE		0xf
+#define DP_DSC_RECEIVER_CAP_SIZE        0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 #define EDP_DISPLAY_CTL_CAP_SIZE	3
 
@@ -993,6 +1052,7 @@ struct dp_sdp_header {
 
 #define EDP_SDP_HEADER_REVISION_MASK		0x1F
 #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES	0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
 
 struct edp_vsc_psr {
 	struct dp_sdp_header sdp_header;
@@ -1059,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 	return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
 }
 
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+				   bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+					 u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+	return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+		DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+	return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+		(dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+		 DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+		 DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+	/* Max Slicewidth = Number of Pixels * 320 */
+	return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+		DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+	return fec_capable & DP_FEC_CAPABLE;
+}
+
 /*
  * DisplayPort AUX channel
  */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26..59f005b 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
 struct drm_dp_mst_topology_state {
 	struct drm_private_state base;
 	int avail_slots;
-	struct drm_atomic_state *state;
 	struct drm_dp_mst_topology_mgr *mgr;
 };
 
@@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr {
 	int pbn_div;
 
 	/**
-	 * @state: State information for topology manager
-	 */
-	struct drm_dp_mst_topology_state *state;
-
-	/**
 	 * @funcs: Atomic helper callbacks
 	 */
 	const struct drm_private_state_funcs *funcs;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 3199ef7..35af23f 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -471,6 +471,8 @@ struct drm_driver {
 	 * @gem_prime_export:
 	 *
 	 * export GEM -> dmabuf
+	 *
+	 * This defaults to drm_gem_prime_export() if not set.
 	 */
 	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
 				struct drm_gem_object *obj, int flags);
@@ -478,6 +480,8 @@ struct drm_driver {
 	 * @gem_prime_import:
 	 *
 	 * import dmabuf -> GEM
+	 *
+	 * This defaults to drm_gem_prime_import() if not set.
 	 */
 	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
 				struct dma_buf *dma_buf);
@@ -523,8 +527,10 @@ struct drm_driver {
 	 * @dumb_map_offset:
 	 *
 	 * Allocate an offset in the drm device node's address space to be able to
-	 * memory map a dumb buffer. GEM-based drivers must use
-	 * drm_gem_create_mmap_offset() to implement this.
+	 * memory map a dumb buffer.
+	 *
+	 * The default implementation is drm_gem_create_mmap_offset(). GEM based
+	 * drivers must not overwrite this.
 	 *
 	 * Called by the user via ioctl.
 	 *
@@ -544,6 +550,9 @@ struct drm_driver {
 	 *
 	 * Called by the user via ioctl.
 	 *
+	 * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
+	 * must not overwrite this.
+	 *
 	 * Returns:
 	 *
 	 * Zero on success, negative errno on failure.
@@ -621,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev);
 
 void drm_dev_get(struct drm_device *dev);
 void drm_dev_put(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
 void drm_put_dev(struct drm_device *dev);
 bool drm_dev_enter(struct drm_device *dev, int *idx);
 void drm_dev_exit(int idx);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 0000000..d03f1b8
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES			15
+#define DSC_MUX_WORD_SIZE_8_10_BPC		48
+#define DSC_MUX_WORD_SIZE_12_BPC		64
+#define DSC_RC_PIXELS_PER_GROUP			3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX	4095
+#define DSC_RANGE_BPG_OFFSET_MASK		0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST		    8192
+#define DSC_RC_EDGE_FACTOR_CONST	    6
+#define DSC_RC_TGT_OFFSET_HI_CONST	    3
+#define DSC_RC_TGT_OFFSET_LO_CONST	    3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT		4
+#define DSC_PPS_BPC_SHIFT			4
+#define DSC_PPS_MSB_SHIFT			8
+#define DSC_PPS_LSB_MASK			(0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK			(0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT			2
+#define DSC_PPS_SIMPLE422_SHIFT			3
+#define DSC_PPS_CONVERT_RGB_SHIFT		4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT		5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK	(0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK		(0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT		4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT		11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT		6
+#define DSC_PPS_NATIVE_420_SHIFT		1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS		16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL		0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS		13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+	/* Min Quantization Parameters allowed for this range */
+	u8 range_min_qp;
+	/* Max Quantization Parameters allowed for this range */
+	u8 range_max_qp;
+	/* Bits/group offset to apply to target for this group */
+	u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+	/* Bits / component for previous reconstructed line buffer */
+	u8 line_buf_depth;
+	/* Bits per component to code (must be 8, 10, or 12) */
+	u8 bits_per_component;
+	/*
+	 * Flag indicating to do RGB - YCoCg conversion
+	 * and back (should be 1 for RGB input)
+	 */
+	bool convert_rgb;
+	u8 slice_count;
+	/* Slice Width */
+	u16 slice_width;
+	/* Slice Height */
+	u16 slice_height;
+	/*
+	 * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+	 * outside of DSC encode/decode algorithm)
+	 */
+	bool enable422;
+	/* Picture Width */
+	u16 pic_width;
+	/* Picture Height */
+	u16 pic_height;
+	/* Offset to bits/group used by RC to determine QP adjustment */
+	u8 rc_tgt_offset_high;
+	/* Offset to bits/group used by RC to determine QP adjustment */
+	u8 rc_tgt_offset_low;
+	/* Bits/pixel target << 4 (ie., 4 fractional bits) */
+	u16 bits_per_pixel;
+	/*
+	 * Factor to determine if an edge is present based
+	 * on the bits produced
+	 */
+	u8 rc_edge_factor;
+	/* Slow down incrementing once the range reaches this value */
+	u8 rc_quant_incr_limit1;
+	/* Slow down incrementing once the range reaches this value */
+	u8 rc_quant_incr_limit0;
+	/* Number of pixels to delay the initial transmission */
+	u16 initial_xmit_delay;
+	/* Number of pixels to delay the VLD on the decoder,not including SSM */
+	u16  initial_dec_delay;
+	/* Block prediction enable */
+	bool block_pred_enable;
+	/* Bits/group offset to use for first line of the slice */
+	u8 first_line_bpg_offset;
+	/* Value to use for RC model offset at slice start */
+	u16 initial_offset;
+	/* Thresholds defining each of the buffer ranges */
+	u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+	/* Parameters for each of the RC ranges */
+	struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+	/* Total size of RC model */
+	u16 rc_model_size;
+	/* Minimum QP where flatness information is sent */
+	u8 flatness_min_qp;
+	/* Maximum QP where flatness information is sent */
+	u8 flatness_max_qp;
+	/* Initial value for scale factor */
+	u8 initial_scale_value;
+	/* Decrement scale factor every scale_decrement_interval groups */
+	u16 scale_decrement_interval;
+	/* Increment scale factor every scale_increment_interval groups */
+	u16 scale_increment_interval;
+	/* Non-first line BPG offset to use */
+	u16 nfl_bpg_offset;
+	/* BPG offset used to enforce slice bit */
+	u16 slice_bpg_offset;
+	/* Final RC linear transformation offset value */
+	u16 final_offset;
+	/* Enable on-off VBR (ie., disable stuffing bits) */
+	bool vbr_enable;
+	/* Mux word size (in bits) for SSM mode */
+	u8 mux_word_size;
+	/*
+	 * The (max) size in bytes of the "chunks" that are
+	 * used in slice multiplexing
+	 */
+	u16 slice_chunk_size;
+	/* Rate Control buffer siz in bits */
+	u16 rc_bits;
+	/* DSC Minor Version */
+	u8 dsc_version_minor;
+	/* DSC Major version */
+	u8 dsc_version_major;
+	/* Native 4:2:2 support */
+	bool native_422;
+	/* Native 4:2:0 support */
+	bool native_420;
+	/* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+	u8 second_line_bpg_offset;
+	/* Num of bits deallocated for each grp that is not in second line of slice */
+	u16 nsl_bpg_offset;
+	/* Offset adj fr second line in Native 4:2:0 mode */
+	u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+	/**
+	 * @dsc_version:
+	 * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+	 * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+	 */
+	u8 dsc_version;
+	/**
+	 * @pps_identifier:
+	 * PPS1[7:0] - Application specific identifier that can be
+	 * used to differentiate between different PPS tables.
+	 */
+	u8 pps_identifier;
+	/**
+	 * @pps_reserved:
+	 * PPS2[7:0]- RESERVED Byte
+	 */
+	u8 pps_reserved;
+	/**
+	 * @pps_3:
+	 * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+	 * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+	 * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+	 * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+	 * PPS3[7:4] - bits_per_component: Bits per component for the original
+	 * pixels of the encoded picture.
+	 * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+	 * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+	 * allowed only when dsc_minor_version = 0x2)
+	 */
+	u8 pps_3;
+	/**
+	 * @pps_4:
+	 * PPS4[1:0] -These are the most significant 2 bits of
+	 * compressed BPP bits_per_pixel[9:0] syntax element.
+	 * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+	 * PPS4[3] - simple_422: Indicates if decoder drops samples to
+	 * reconstruct the 4:2:2 picture.
+	 * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+	 * active.
+	 * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+	 * groups in picture
+	 * PPS4[7:6] - Reseved bits
+	 */
+	u8 pps_4;
+	/**
+	 * @bits_per_pixel_low:
+	 * PPS5[7:0] - This indicates the lower significant 8 bits of
+	 * the compressed BPP bits_per_pixel[9:0] element.
+	 */
+	u8 bits_per_pixel_low;
+	/**
+	 * @pic_height:
+	 * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+	 * within the raster.
+	 */
+	__be16 pic_height;
+	/**
+	 * @pic_width:
+	 * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+	 * the raster.
+	 */
+	__be16 pic_width;
+	/**
+	 * @slice_height:
+	 * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+	 */
+	__be16 slice_height;
+	/**
+	 * @slice_width:
+	 * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+	 */
+	__be16 slice_width;
+	/**
+	 * @chunk_size:
+	 * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+	 * that are used for slice multiplexing.
+	 */
+	__be16 chunk_size;
+	/**
+	 * @initial_xmit_delay_high:
+	 * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+	 * It specifies the number of pixel times that the encoder waits before
+	 * transmitting data from its rate buffer.
+	 * PPS16[7:2] - Reserved
+	 */
+	u8 initial_xmit_delay_high;
+	/**
+	 * @initial_xmit_delay_low:
+	 * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+	 */
+	u8 initial_xmit_delay_low;
+	/**
+	 * @initial_dec_delay:
+	 *
+	 * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+	 * of pixel times that the decoder accumulates data in its rate buffer
+	 * before starting to decode and output pixels.
+	 */
+	__be16 initial_dec_delay;
+	/**
+	 * @pps20_reserved:
+	 *
+	 * PPS20[7:0] - Reserved
+	 */
+	u8 pps20_reserved;
+	/**
+	 * @initial_scale_value:
+	 * PPS21[5:0] - Initial rcXformScale factor used at beginning
+	 * of a slice.
+	 * PPS21[7:6] - Reserved
+	 */
+	u8 initial_scale_value;
+	/**
+	 * @scale_increment_interval:
+	 * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+	 * the rcXformScale factor at end of a slice.
+	 */
+	__be16 scale_increment_interval;
+	/**
+	 * @scale_decrement_interval_high:
+	 * PPS24[3:0] - Higher 4 bits indicating number of group times between
+	 * decrementing the rcXformScale factor at beginning of a slice.
+	 * PPS24[7:4] - Reserved
+	 */
+	u8 scale_decrement_interval_high;
+	/**
+	 * @scale_decrement_interval_low:
+	 * PPS25[7:0] - Lower 8 bits of scale decrement interval
+	 */
+	u8 scale_decrement_interval_low;
+	/**
+	 * @pps26_reserved:
+	 * PPS26[7:0]
+	 */
+	u8 pps26_reserved;
+	/**
+	 * @first_line_bpg_offset:
+	 * PPS27[4:0] - Number of additional bits that are allocated
+	 * for each group on first line of a slice.
+	 * PPS27[7:5] - Reserved
+	 */
+	u8 first_line_bpg_offset;
+	/**
+	 * @nfl_bpg_offset:
+	 * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+	 * deallocated for each group for groups after the first line of slice.
+	 */
+	__be16 nfl_bpg_offset;
+	/**
+	 * @slice_bpg_offset:
+	 * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+	 * group to enforce the slice constraint.
+	 */
+	__be16 slice_bpg_offset;
+	/**
+	 * @initial_offset:
+	 * PPS32,33[7:0] - Initial value for rcXformOffset
+	 */
+	__be16 initial_offset;
+	/**
+	 * @final_offset:
+	 * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+	 */
+	__be16 final_offset;
+	/**
+	 * @flatness_min_qp:
+	 * PPS36[4:0] - Minimum QP at which flatness is signaled and
+	 * flatness QP adjustment is made.
+	 * PPS36[7:5] - Reserved
+	 */
+	u8 flatness_min_qp;
+	/**
+	 * @flatness_max_qp:
+	 * PPS37[4:0] - Max QP at which flatness is signalled and
+	 * the flatness adjustment is made.
+	 * PPS37[7:5] - Reserved
+	 */
+	u8 flatness_max_qp;
+	/**
+	 * @rc_model_size:
+	 * PPS38,39[7:0] - Number of bits within RC Model.
+	 */
+	__be16 rc_model_size;
+	/**
+	 * @rc_edge_factor:
+	 * PPS40[3:0] - Ratio of current activity vs, previous
+	 * activity to determine presence of edge.
+	 * PPS40[7:4] - Reserved
+	 */
+	u8 rc_edge_factor;
+	/**
+	 * @rc_quant_incr_limit0:
+	 * PPS41[4:0] - QP threshold used in short term RC
+	 * PPS41[7:5] - Reserved
+	 */
+	u8 rc_quant_incr_limit0;
+	/**
+	 * @rc_quant_incr_limit1:
+	 * PPS42[4:0] - QP threshold used in short term RC
+	 * PPS42[7:5] - Reserved
+	 */
+	u8 rc_quant_incr_limit1;
+	/**
+	 * @rc_tgt_offset:
+	 * PPS43[3:0] - Lower end of the variability range around the target
+	 * bits per group that is allowed by short term RC.
+	 * PPS43[7:4]- Upper end of the variability range around the target
+	 * bits per group that i allowed by short term rc.
+	 */
+	u8 rc_tgt_offset;
+	/**
+	 * @rc_buf_thresh:
+	 * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+	 * the 15 ranges defined by 14 thresholds.
+	 */
+	u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+	/**
+	 * @rc_range_parameters:
+	 * PPS58[7:0] - PPS87[7:0]
+	 * Parameters that correspond to each of the 15 ranges.
+	 */
+	__be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+	/**
+	 * @native_422_420:
+	 * PPS88[0] - 0 = Native 4:2:2 not used
+	 * 1 = Native 4:2:2 used
+	 * PPS88[1] - 0 = Native 4:2:0 not use
+	 * 1 = Native 4:2:0 used
+	 * PPS88[7:2] - Reserved 6 bits
+	 */
+	u8 native_422_420;
+	/**
+	 * @second_line_bpg_offset:
+	 * PPS89[4:0] - Additional bits/group budget for the
+	 * second line of a slice in Native 4:2:0 mode.
+	 * Set to 0 if DSC minor version is 1 or native420 is 0.
+	 * PPS89[7:5] - Reserved
+	 */
+	u8 second_line_bpg_offset;
+	/**
+	 * @nsl_bpg_offset:
+	 * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+	 * for each group that is not in the second line of a slice.
+	 */
+	__be16 nsl_bpg_offset;
+	/**
+	 * @second_line_offset_adj:
+	 * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+	 * line in Native 4:2:0 mode.
+	 */
+	__be16 second_line_offset_adj;
+	/**
+	 * @pps_long_94_reserved:
+	 * PPS 94, 95, 96, 97 - Reserved
+	 */
+	u32 pps_long_94_reserved;
+	/**
+	 * @pps_long_98_reserved:
+	 * PPS 98, 99, 100, 101 - Reserved
+	 */
+	u32 pps_long_98_reserved;
+	/**
+	 * @pps_long_102_reserved:
+	 * PPS 102, 103, 104, 105 - Reserved
+	 */
+	u32 pps_long_102_reserved;
+	/**
+	 * @pps_long_106_reserved:
+	 * PPS 106, 107, 108, 109 - reserved
+	 */
+	u32 pps_long_106_reserved;
+	/**
+	 * @pps_long_110_reserved:
+	 * PPS 110, 111, 112, 113 - reserved
+	 */
+	u32 pps_long_110_reserved;
+	/**
+	 * @pps_long_114_reserved:
+	 * PPS 114 - 117 - reserved
+	 */
+	u32 pps_long_114_reserved;
+	/**
+	 * @pps_long_118_reserved:
+	 * PPS 118 - 121 - reserved
+	 */
+	u32 pps_long_118_reserved;
+	/**
+	 * @pps_long_122_reserved:
+	 * PPS 122- 125 - reserved
+	 */
+	u32 pps_long_122_reserved;
+	/**
+	 * @pps_short_126_reserved:
+	 * PPS 126, 127 - reserved
+	 */
+	__be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+	struct dp_sdp_header pps_header;
+	struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+				const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a65f0d..8dbbe1e 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,8 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
-					bool state);
 
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 	unsigned int plane);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 26485ac..84ac792 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -164,14 +164,14 @@ struct drm_file {
 	 * See also the :ref:`section on primary nodes and authentication
 	 * <drm_primary_node>`.
 	 */
-	unsigned authenticated :1;
+	bool authenticated;
 
 	/**
 	 * @stereo_allowed:
 	 *
 	 * True when the client has asked us to expose stereo 3D mode flags.
 	 */
-	unsigned stereo_allowed :1;
+	bool stereo_allowed;
 
 	/**
 	 * @universal_planes:
@@ -179,10 +179,10 @@ struct drm_file {
 	 * True if client understands CRTC primary planes and cursor planes
 	 * in the plane list. Automatically set when @atomic is set.
 	 */
-	unsigned universal_planes:1;
+	bool universal_planes;
 
 	/** @atomic: True if client understands atomic properties. */
-	unsigned atomic:1;
+	bool atomic;
 
 	/**
 	 * @aspect_ratio_allowed:
@@ -190,14 +190,14 @@ struct drm_file {
 	 * True, if client can handle picture aspect ratios, and has requested
 	 * to pass this information along with the mode.
 	 */
-	unsigned aspect_ratio_allowed:1;
+	bool aspect_ratio_allowed;
 
 	/**
 	 * @writeback_connectors:
 	 *
 	 * True if client understands writeback connectors
 	 */
-	unsigned writeback_connectors:1;
+	bool writeback_connectors;
 
 	/**
 	 * @is_master:
@@ -208,7 +208,7 @@ struct drm_file {
 	 * See also the :ref:`section on primary nodes and authentication
 	 * <drm_primary_node>`.
 	 */
-	unsigned is_master:1;
+	bool is_master;
 
 	/**
 	 * @master:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 865ef60..bcb389f 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -52,25 +52,86 @@ struct drm_mode_fb_cmd2;
 
 /**
  * struct drm_format_info - information about a DRM format
- * @format: 4CC format identifier (DRM_FORMAT_*)
- * @depth: Color depth (number of bits per pixel excluding padding bits),
- *	valid for a subset of RGB formats only. This is a legacy field, do not
- *	use in new code and set to 0 for new formats.
- * @num_planes: Number of color planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @has_alpha: Does the format embeds an alpha component?
- * @is_yuv: Is it a YUV format?
  */
 struct drm_format_info {
+	/** @format: 4CC format identifier (DRM_FORMAT_*) */
 	u32 format;
+
+	/**
+	 * @depth:
+	 *
+	 * Color depth (number of bits per pixel excluding padding bits),
+	 * valid for a subset of RGB formats only. This is a legacy field, do
+	 * not use in new code and set to 0 for new formats.
+	 */
 	u8 depth;
+
+	/** @num_planes: Number of color planes (1 to 3) */
 	u8 num_planes;
-	u8 cpp[3];
+
+	union {
+		/**
+		 * @cpp:
+		 *
+		 * Number of bytes per pixel (per plane), this is aliased with
+		 * @char_per_block. It is deprecated in favour of using the
+		 * triplet @char_per_block, @block_w, @block_h for better
+		 * describing the pixel format.
+		 */
+		u8 cpp[3];
+
+		/**
+		 * @char_per_block:
+		 *
+		 * Number of bytes per block (per plane), where blocks are
+		 * defined as a rectangle of pixels which are stored next to
+		 * each other in a byte aligned memory region. Together with
+		 * @block_w and @block_h this is used to properly describe tiles
+		 * in tiled formats or to describe groups of pixels in packed
+		 * formats for which the memory needed for a single pixel is not
+		 * byte aligned.
+		 *
+		 * @cpp has been kept for historical reasons because there are
+		 * a lot of places in drivers where it's used. In drm core for
+		 * generic code paths the preferred way is to use
+		 * @char_per_block, drm_format_info_block_width() and
+		 * drm_format_info_block_height() which allows handling both
+		 * block and non-block formats in the same way.
+		 *
+		 * For formats that are intended to be used only with non-linear
+		 * modifiers both @cpp and @char_per_block must be 0 in the
+		 * generic format table. Drivers could supply accurate
+		 * information from their drm_mode_config.get_format_info hook
+		 * if they want the core to be validating the pitch.
+		 */
+		u8 char_per_block[3];
+	};
+
+	/**
+	 * @block_w:
+	 *
+	 * Block width in pixels, this is intended to be accessed through
+	 * drm_format_info_block_width()
+	 */
+	u8 block_w[3];
+
+	/**
+	 * @block_h:
+	 *
+	 * Block height in pixels, this is intended to be accessed through
+	 * drm_format_info_block_height()
+	 */
+	u8 block_h[3];
+
+	/** @hsub: Horizontal chroma subsampling factor */
 	u8 hsub;
+	/** @vsub: Vertical chroma subsampling factor */
 	u8 vsub;
+
+	/** @has_alpha: Does the format embeds an alpha component? */
 	bool has_alpha;
+
+	/** @is_yuv: Is it a YUV format? */
 	bool is_yuv;
 };
 
@@ -96,6 +157,12 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
 int drm_format_vert_chroma_subsampling(uint32_t format);
 int drm_format_plane_width(int width, uint32_t format, int plane);
 int drm_format_plane_height(int height, uint32_t format, int plane);
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+					 int plane);
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+					  int plane);
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+				   int plane, unsigned int buffer_width);
 const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
 
 #endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c50502c..c94aced 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
 }
 
 /**
- * drm_framebuffer_reference - acquire a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_get() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
-	drm_framebuffer_get(fb);
-}
-
-/**
- * drm_framebuffer_unreference - release a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_put() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-	drm_framebuffer_put(fb);
-}
-
-/**
  * drm_framebuffer_read_refcount - read the framebuffer reference count.
  * @fb: framebuffer
  *
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 3583b98..c957274 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -38,6 +38,121 @@
 
 #include <drm/drm_vma_manager.h>
 
+struct drm_gem_object;
+
+/**
+ * struct drm_gem_object_funcs - GEM object functions
+ */
+struct drm_gem_object_funcs {
+	/**
+	 * @free:
+	 *
+	 * Deconstructor for drm_gem_objects.
+	 *
+	 * This callback is mandatory.
+	 */
+	void (*free)(struct drm_gem_object *obj);
+
+	/**
+	 * @open:
+	 *
+	 * Called upon GEM handle creation.
+	 *
+	 * This callback is optional.
+	 */
+	int (*open)(struct drm_gem_object *obj, struct drm_file *file);
+
+	/**
+	 * @close:
+	 *
+	 * Called upon GEM handle release.
+	 *
+	 * This callback is optional.
+	 */
+	void (*close)(struct drm_gem_object *obj, struct drm_file *file);
+
+	/**
+	 * @print_info:
+	 *
+	 * If driver subclasses struct &drm_gem_object, it can implement this
+	 * optional hook for printing additional driver specific info.
+	 *
+	 * drm_printf_indent() should be used in the callback passing it the
+	 * indent argument.
+	 *
+	 * This callback is called from drm_gem_print_info().
+	 *
+	 * This callback is optional.
+	 */
+	void (*print_info)(struct drm_printer *p, unsigned int indent,
+			   const struct drm_gem_object *obj);
+
+	/**
+	 * @export:
+	 *
+	 * Export backing buffer as a &dma_buf.
+	 * If this is not set drm_gem_prime_export() is used.
+	 *
+	 * This callback is optional.
+	 */
+	struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
+
+	/**
+	 * @pin:
+	 *
+	 * Pin backing buffer in memory.
+	 *
+	 * This callback is optional.
+	 */
+	int (*pin)(struct drm_gem_object *obj);
+
+	/**
+	 * @unpin:
+	 *
+	 * Unpin backing buffer.
+	 *
+	 * This callback is optional.
+	 */
+	void (*unpin)(struct drm_gem_object *obj);
+
+	/**
+	 * @get_sg_table:
+	 *
+	 * Returns a Scatter-Gather table representation of the buffer.
+	 * Used when exporting a buffer.
+	 *
+	 * This callback is mandatory if buffer export is supported.
+	 */
+	struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
+
+	/**
+	 * @vmap:
+	 *
+	 * Returns a virtual address for the buffer.
+	 *
+	 * This callback is optional.
+	 */
+	void *(*vmap)(struct drm_gem_object *obj);
+
+	/**
+	 * @vunmap:
+	 *
+	 * Releases the the address previously returned by @vmap.
+	 *
+	 * This callback is optional.
+	 */
+	void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+	/**
+	 * @vm_ops:
+	 *
+	 * Virtual memory operations used with mmap.
+	 *
+	 * This is optional but necessary for mmap support.
+	 */
+	const struct vm_operations_struct *vm_ops;
+};
+
 /**
  * struct drm_gem_object - GEM buffer object
  *
@@ -146,6 +261,17 @@ struct drm_gem_object {
 	 * simply leave it as NULL.
 	 */
 	struct dma_buf_attachment *import_attach;
+
+	/**
+	 * @funcs:
+	 *
+	 * Optional GEM object functions. If this is set, it will be used instead of the
+	 * corresponding &drm_driver GEM callbacks.
+	 *
+	 * New drivers should use this.
+	 *
+	 */
+	const struct drm_gem_object_funcs *funcs;
 };
 
 /**
@@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj)
 void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
 void drm_gem_object_put(struct drm_gem_object *obj);
 
-/**
- * drm_gem_object_reference - acquire a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_get() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_reference(struct drm_gem_object *obj)
-{
-	drm_gem_object_get(obj);
-}
-
-/**
- * __drm_gem_object_unreference - raw function to release a GEM buffer object
- *                                reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for __drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
-{
-	__drm_gem_object_put(obj);
-}
-
-/**
- * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put_unlocked() and should
- * not be used by new code.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
-	drm_gem_object_put_unlocked(obj);
-}
-
-/**
- * drm_gem_object_unreference - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
-{
-	drm_gem_object_put(obj);
-}
-
 int drm_gem_handle_create(struct drm_file *file_priv,
 			  struct drm_gem_object *obj,
 			  u32 *handlep);
@@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
 			 struct drm_device *dev,
 			 uint32_t handle);
 
+int drm_gem_pin(struct drm_gem_object *obj);
+void drm_gem_unpin(struct drm_gem_object *obj);
+void *drm_gem_vmap(struct drm_gem_object *obj);
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
 #endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 1977714..07c5049 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+
+/**
+ * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
+ *                               address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ */
+#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
+	.gem_create_object	= drm_cma_gem_create_object_default_funcs, \
+	.dumb_create		= drm_gem_cma_dumb_create, \
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+	.gem_prime_mmap		= drm_gem_prime_mmap
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+				       struct dma_buf_attachment *attach,
+				       struct sg_table *sgt);
+
 #endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a83060..0000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
-	DRM_GLOBAL_TTM_MEM = 0,
-	DRM_GLOBAL_TTM_BO,
-	DRM_GLOBAL_TTM_OBJECT,
-	DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
-	enum drm_global_types global_type;
-	size_t size;
-	void *object;
-	int (*init) (struct drm_global_reference *);
-	void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d8..a6de09c 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
 #define DRM_HDCP_DDC_BSTATUS			0x41
 #define DRM_HDCP_DDC_KSV_FIFO			0x43
 
+#define DRM_HDCP_1_4_SRM_ID			0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE		3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE		40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0			0x00
+#define HDCP_STREAM_TYPE1			0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG			1
+#define HDCP_2_2_AKE_INIT			2
+#define HDCP_2_2_AKE_SEND_CERT			3
+#define HDCP_2_2_AKE_NO_STORED_KM		4
+#define HDCP_2_2_AKE_STORED_KM			5
+#define HDCP_2_2_AKE_SEND_HPRIME		7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO		8
+#define HDCP_2_2_LC_INIT			9
+#define HDCP_2_2_LC_SEND_LPRIME			10
+#define HDCP_2_2_SKE_SEND_EKS			11
+#define HDCP_2_2_REP_SEND_RECVID_LIST		12
+#define HDCP_2_2_REP_SEND_ACK			15
+#define HDCP_2_2_REP_STREAM_MANAGE		16
+#define HDCP_2_2_REP_STREAM_READY		17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE		50
+
+#define HDCP_2_2_RTX_LEN			8
+#define HDCP_2_2_RRX_LEN			8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN		128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN		3
+#define HDCP_2_2_K_PUB_RX_LEN			(HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+						 HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN		384
+
+#define HDCP_2_2_E_KPUB_KM_LEN			128
+#define HDCP_2_2_E_KH_KM_M_LEN			(16 + 16)
+#define HDCP_2_2_H_PRIME_LEN			32
+#define HDCP_2_2_E_KH_KM_LEN			16
+#define HDCP_2_2_RN_LEN				8
+#define HDCP_2_2_L_PRIME_LEN			32
+#define HDCP_2_2_E_DKEY_KS_LEN			16
+#define HDCP_2_2_RIV_LEN			8
+#define HDCP_2_2_SEQ_NUM_LEN			3
+#define HDCP_2_2_V_PRIME_HALF_LEN		(HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN		DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT		31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN		(HDCP_2_2_RECEIVER_ID_LEN * \
+						 HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN			32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT	1
+#define HDCP_2_2_TXCAP_MASK_LEN			2
+#define HDCP_2_2_RXCAPS_LEN			3
+#define HDCP_2_2_RX_REPEATER(x)			((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x)		((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN			2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x)	((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x)	((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x)	((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x)		((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x)		(((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x)		((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x)			(((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+	u8	receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+	u8	kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+	u8	reserved[2];
+	u8	dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+	u8	stream_id;
+	u8	stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+	/* Transmitter must set this to 0x2 */
+	u8	version;
+
+	/* Reserved for HDCP and DP Spec. Read as Zero */
+	u8	tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+	u8			msg_id;
+	u8			r_tx[HDCP_2_2_RTX_LEN];
+	struct hdcp2_tx_caps	tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+	u8			msg_id;
+	struct hdcp2_cert_rx	cert_rx;
+	u8			r_rx[HDCP_2_2_RRX_LEN];
+	u8			rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+	u8	msg_id;
+	u8	e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+	u8	msg_id;
+	u8	e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+	u8	msg_id;
+	u8	h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+	u8	msg_id;
+	u8	e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+	u8	msg_id;
+	u8	r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+	u8	msg_id;
+	u8	l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+	u8	msg_id;
+	u8	e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+	u8	riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+	u8	msg_id;
+	u8	rx_info[HDCP_2_2_RXINFO_LEN];
+	u8	seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+	u8	v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+	u8	receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+	u8	msg_id;
+	u8	v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+	u8			msg_id;
+	u8			seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+	__be16			k;
+	struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+	u8	msg_id;
+	u8	m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+	u8	msg_id;
+	u8	stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS		100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS	1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS	200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS		200
+#define	HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS		20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS		7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS		3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS	100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET		0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET		0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET	0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET		0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET		0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK		BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL		0x02
+#define HDCP_2_2_SEQ_NUM_MAX			0xFFFFFF
+#define	HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN	200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN		2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x)	((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x)		((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x)	((x) & BIT(3))
+
 #endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef190..491528f 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,12 @@ struct mipi_dsi_device_info {
  * @format: pixel format for video mode
  * @lanes: number of active data lanes
  * @mode_flags: DSI operation mode related flags
+ * @hs_rate: maximum lane frequency for high speed mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
+ * @lp_rate: maximum lane frequency for low power mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
  */
 struct mipi_dsi_device {
 	struct mipi_dsi_host *host;
@@ -178,6 +184,8 @@ struct mipi_dsi_device {
 	unsigned int lanes;
 	enum mipi_dsi_pixel_format format;
 	unsigned long mode_flags;
+	unsigned long hs_rate;
+	unsigned long lp_rate;
 };
 
 #define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 928e417..572274c 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -52,6 +52,12 @@ struct drm_mode_config_funcs {
 	 * requested metadata, but most of that is left to the driver. See
 	 * &struct drm_mode_fb_cmd2 for details.
 	 *
+	 * To validate the pixel format and modifier drivers can use
+	 * drm_any_plane_has_format() to make sure at least one plane supports
+	 * the requested values. Note that the driver must first determine the
+	 * actual modifier used if the request doesn't have it specified,
+	 * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
+	 *
 	 * If the parameters are deemed valid and the backing storage objects in
 	 * the underlying memory manager all exist, then the driver allocates
 	 * a new &drm_framebuffer structure, subclassed to contain
@@ -628,6 +634,15 @@ struct drm_mode_config {
 	 */
 	struct drm_property *prop_crtc_id;
 	/**
+	 * @prop_fb_damage_clips: Optional plane property to mark damaged
+	 * regions on the plane in framebuffer coordinates of the framebuffer
+	 * attached to the plane.
+	 *
+	 * The layout of blob data is simply an array of &drm_mode_rect. Unlike
+	 * plane src coordinates, damage clips are not in 16.16 fixed point.
+	 */
+	struct drm_property *prop_fb_damage_clips;
+	/**
 	 * @prop_active: Default atomic CRTC property to control the active
 	 * state, which is the simplified implementation for DPMS in atomic
 	 * drivers.
@@ -639,6 +654,11 @@ struct drm_mode_config {
 	 * connectors must be of and active must be set to disabled, too.
 	 */
 	struct drm_property *prop_mode_id;
+	/**
+	 * @prop_vrr_enabled: Default atomic CRTC property to indicate
+	 * whether variable refresh rate should be enabled on the CRTC.
+	 */
+	struct drm_property *prop_vrr_enabled;
 
 	/**
 	 * @dvi_i_subconnector_property: Optional DVI-I property to
@@ -809,6 +829,13 @@ struct drm_mode_config {
 
 	/* dumb ioctl parameters */
 	uint32_t preferred_depth, prefer_shadow;
+
+	/**
+	 * @quirk_addfb_prefer_xbgr_30bpp:
+	 *
+	 * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
+	 * only ever be set by the nouveau kernel driver.
+	 */
 	bool quirk_addfb_prefer_xbgr_30bpp;
 
 	/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index a685d1b..a308f2d6 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
 int drm_modeset_lock_all_ctx(struct drm_device *dev,
 			     struct drm_modeset_acquire_ctx *ctx);
 
+/**
+ * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
+ * @dev: drm device
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * Use these macros to simplify grabbing all modeset locks using a local
+ * context. This has the advantage of reducing boilerplate, but also properly
+ * checking return values where appropriate.
+ *
+ * Any code run between BEGIN and END will be holding the modeset locks.
+ *
+ * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
+ * forth between the labels on deadlock and error conditions.
+ *
+ * Drivers can acquire additional modeset locks. If any lock acquisition
+ * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
+ * the @ret parameter containing the return value of drm_modeset_lock().
+ *
+ * Returns:
+ * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
+ * is 0, so no error checking is necessary
+ */
+#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret)		\
+	drm_modeset_acquire_init(&ctx, flags);				\
+modeset_lock_retry:							\
+	ret = drm_modeset_lock_all_ctx(dev, &ctx);			\
+	if (ret)							\
+		goto modeset_lock_fail;
+
+/**
+ * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
+ * if ret is -EDEADLK.
+ *
+ * It's important that you use the same ret variable for begin and end so
+ * deadlock conditions are properly handled.
+ *
+ * Returns:
+ * ret will be untouched unless it is -EDEADLK on entry. That means that if you
+ * successfully acquire the locks, ret will be whatever your code sets it to. If
+ * there is a deadlock or other failure with acquire or backoff, ret will be set
+ * to that failure. In both of these cases the code between BEGIN/END will not
+ * be run, so the failure will reflect the inability to grab the locks.
+ */
+#define DRM_MODESET_LOCK_ALL_END(ctx, ret)				\
+modeset_lock_fail:							\
+	if (ret == -EDEADLK) {						\
+		ret = drm_modeset_backoff(&ctx);			\
+		if (!ret)						\
+			goto modeset_lock_retry;			\
+	}								\
+	drm_modeset_drop_locks(&ctx);					\
+	drm_modeset_acquire_fini(&ctx);
+
 #endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0a0834b..6078c70 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -173,6 +173,16 @@ struct drm_plane_state {
 	 */
 	enum drm_color_range color_range;
 
+	/**
+	 * @fb_damage_clips:
+	 *
+	 * Blob representing damage (area in plane framebuffer that changed
+	 * since last plane update) as an array of &drm_mode_rect in framebuffer
+	 * coodinates of the attached framebuffer. Note that unlike plane src,
+	 * damage clips are not in 16.16 fixed point.
+	 */
+	struct drm_property_blob *fb_damage_clips;
+
 	/** @src: clipped source coordinates of the plane (in 16.16) */
 	/** @dst: clipped destination coordinates of the plane */
 	struct drm_rect src, dst;
@@ -798,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
 #define drm_for_each_plane(plane, dev) \
 	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
 
+bool drm_any_plane_has_format(struct drm_device *dev,
+			      u32 format, u64 modifier);
+/**
+ * drm_plane_get_damage_clips_count - Returns damage clips count.
+ * @state: Plane state.
+ *
+ * Simple helper to get the number of &drm_mode_rect clips set by user-space
+ * during plane update.
+ *
+ * Return: Number of clips in plane fb_damage_clips blob property.
+ */
+static inline unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
+{
+	return (state && state->fb_damage_clips) ?
+		state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
+}
+
+/**
+ * drm_plane_get_damage_clips - Returns damage clips.
+ * @state: Plane state.
+ *
+ * Note that this function returns uapi type &drm_mode_rect. Drivers might
+ * instead be interested in internal &drm_rect which can be obtained by calling
+ * drm_helper_get_plane_damage_clips().
+ *
+ * Return: Damage clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state)
+{
+	return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
+					state->fb_damage_clips->data : NULL);
+}
 
 #endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 26cee29..331ebd6 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,42 +38,7 @@
  */
 #define DRM_PLANE_HELPER_NO_SCALING (1<<16)
 
-int drm_plane_helper_check_update(struct drm_plane *plane,
-				  struct drm_crtc *crtc,
-				  struct drm_framebuffer *fb,
-				  struct drm_rect *src,
-				  struct drm_rect *dest,
-				  unsigned int rotation,
-				  int min_scale,
-				  int max_scale,
-				  bool can_position,
-				  bool can_update_disabled,
-				  bool *visible);
-int drm_primary_helper_update(struct drm_plane *plane,
-			      struct drm_crtc *crtc,
-			      struct drm_framebuffer *fb,
-			      int crtc_x, int crtc_y,
-			      unsigned int crtc_w, unsigned int crtc_h,
-			      uint32_t src_x, uint32_t src_y,
-			      uint32_t src_w, uint32_t src_h,
-			      struct drm_modeset_acquire_ctx *ctx);
-int drm_primary_helper_disable(struct drm_plane *plane,
-			       struct drm_modeset_acquire_ctx *ctx);
 void drm_primary_helper_destroy(struct drm_plane *plane);
 extern const struct drm_plane_funcs drm_primary_helper_funcs;
 
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
-			    struct drm_framebuffer *fb,
-			    int crtc_x, int crtc_y,
-			    unsigned int crtc_w, unsigned int crtc_h,
-			    uint32_t src_x, uint32_t src_y,
-			    uint32_t src_w, uint32_t src_h,
-			    struct drm_modeset_acquire_ctx *ctx);
-int drm_plane_helper_disable(struct drm_plane *plane,
-			     struct drm_modeset_acquire_ctx *ctx);
-
-/* For use by drm_crtc_helper.c */
-int drm_plane_helper_commit(struct drm_plane *plane,
-			    struct drm_plane_state *plane_state,
-			    struct drm_framebuffer *old_fb);
 #endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d716d65..b03731a 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 			       struct drm_file *file_priv, uint32_t handle, uint32_t flags,
 			       int *prime_fd);
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
 					    struct dma_buf *dma_buf);
 
@@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 			   enum dma_data_direction dir);
 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
-			   void *addr);
 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
 
 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 5b9efff..4a0a80d 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -153,7 +153,8 @@ struct drm_property {
 	 *     userspace. The kernel is allowed to update the value of these
 	 *     properties. This is generally used to expose probe state to
 	 *     userspace, e.g. the EDID, or the connector path property on DP
-	 *     MST sinks.
+	 *     MST sinks. Kernel can update the value of an immutable property
+	 *     by calling drm_object_property_set_value().
 	 */
 	uint32_t flags;
 
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 425432b..b1fe921 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
 
 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
 				     u32 handle);
-void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 			       struct dma_fence *fence);
 int drm_syncobj_find_fence(struct drm_file *file_private,
-			   u32 handle, u64 point,
+			   u32 handle, u64 point, u64 flags,
 			   struct dma_fence **fence);
 void drm_syncobj_free(struct kref *kref);
 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a960..6ad9630 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -95,7 +95,7 @@ struct drm_vblank_crtc {
 	/**
 	 * @queue: Wait queue for vblank waiters.
 	 */
-	wait_queue_head_t queue;	/**< VBLANK wait queue */
+	wait_queue_head_t queue;
 	/**
 	 * @disable_timer: Disable timer for the delayed vblank disabling
 	 * hysteresis logic. Vblank disabling is controlled through the
@@ -107,7 +107,7 @@ struct drm_vblank_crtc {
 	/**
 	 * @seqlock: Protect vblank count and time.
 	 */
-	seqlock_t seqlock;		/* protects vblank count and time */
+	seqlock_t seqlock;
 
 	/**
 	 * @count: Current software vblank counter.
@@ -123,7 +123,7 @@ struct drm_vblank_crtc {
 	 * this refcount reaches 0 can the hardware interrupt be disabled using
 	 * @disable_timer.
 	 */
-	atomic_t refcount;		/* number of users of vblank interruptsper crtc */
+	atomic_t refcount;
 	/**
 	 * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
 	 */
@@ -136,7 +136,7 @@ struct drm_vblank_crtc {
 	 * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
 	 * save and restore the vblank count.
 	 */
-	unsigned int inmodeset;		/* Display driver is setting mode */
+	unsigned int inmodeset;
 	/**
 	 * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
 	 * structure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268..47e1979 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
  * @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
  *
  * One scheduler is implemented for each hardware ring.
  */
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
 	spinlock_t			job_list_lock;
 	int				hang_limit;
 	atomic_t                        num_jobs;
+	bool			ready;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,
 		   const struct drm_sched_backend_ops *ops,
 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
 		   const char *name);
+
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
 			    struct drm_sched_job *job);
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 				    struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
 void drm_sched_job_kickout(struct drm_sched_job *s_job);
 
 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
@@ -326,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create(
 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+		                unsigned long remaining);
+
 #endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fd965ff..1926671 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
 	INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
 /* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
 	INTEL_VGA_DEVICE(0x591C, info),  /* ULX GT2 */ \
 	INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
 
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+	INTEL_VGA_DEVICE(0x87CA, info)
+
 #define INTEL_KBL_IDS(info) \
 	INTEL_KBL_GT1_IDS(info), \
 	INTEL_KBL_GT2_IDS(info), \
 	INTEL_KBL_GT3_IDS(info), \
 	INTEL_KBL_GT4_IDS(info), \
-	INTEL_AML_GT2_IDS(info)
+	INTEL_AML_KBL_GT2_IDS(info)
 
 /* CFL S */
 #define INTEL_CFL_S_GT1_IDS(info) \
@@ -407,17 +411,17 @@
 
 /* WHL/CFL U GT1 */
 #define INTEL_WHL_U_GT1_IDS(info) \
-	INTEL_VGA_DEVICE(0x3EA1, info)
+	INTEL_VGA_DEVICE(0x3EA1, info), \
+	INTEL_VGA_DEVICE(0x3EA4, info)
 
 /* WHL/CFL U GT2 */
 #define INTEL_WHL_U_GT2_IDS(info) \
-	INTEL_VGA_DEVICE(0x3EA0, info)
+	INTEL_VGA_DEVICE(0x3EA0, info), \
+	INTEL_VGA_DEVICE(0x3EA3, info)
 
 /* WHL/CFL U GT3 */
 #define INTEL_WHL_U_GT3_IDS(info) \
-	INTEL_VGA_DEVICE(0x3EA2, info), \
-	INTEL_VGA_DEVICE(0x3EA3, info), \
-	INTEL_VGA_DEVICE(0x3EA4, info)
+	INTEL_VGA_DEVICE(0x3EA2, info)
 
 #define INTEL_CFL_IDS(info)	   \
 	INTEL_CFL_S_GT1_IDS(info), \
@@ -427,7 +431,8 @@
 	INTEL_CFL_U_GT3_IDS(info), \
 	INTEL_WHL_U_GT1_IDS(info), \
 	INTEL_WHL_U_GT2_IDS(info), \
-	INTEL_WHL_U_GT3_IDS(info)
+	INTEL_WHL_U_GT3_IDS(info), \
+	INTEL_AML_CFL_GT2_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index fe9827d..448aa5e 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -10,10 +10,15 @@
 #ifndef __LINUX_TINYDRM_H
 #define __LINUX_TINYDRM_H
 
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <linux/mutex.h>
 #include <drm/drm_simple_kms_helper.h>
 
+struct drm_clip_rect;
+struct drm_driver;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+
 /**
  * struct tinydrm_device - tinydrm device
  */
@@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
 }
 
 /**
- * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
- *
- * This macro provides a shortcut for setting the tinydrm GEM operations in
- * the &drm_driver structure.
- */
-#define TINYDRM_GEM_DRIVER_OPS \
-	.gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
-	.gem_print_info		= drm_gem_cma_print_info, \
-	.gem_vm_ops		= &drm_gem_cma_vm_ops, \
-	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
-	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
-	.gem_prime_import	= drm_gem_prime_import, \
-	.gem_prime_export	= drm_gem_prime_export, \
-	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table, \
-	.gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
-	.gem_prime_vmap		= drm_gem_cma_prime_vmap, \
-	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap, \
-	.gem_prime_mmap		= drm_gem_cma_prime_mmap, \
-	.dumb_create		= drm_gem_cma_dumb_create
-
-/**
  * TINYDRM_MODE - tinydrm display mode
  * @hd: Horizontal resolution, width
  * @vd: Vertical resolution, height
@@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
 	.type = DRM_MODE_TYPE_DRIVER, \
 	.clock = 1 /* pass validation */
 
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
-				      struct dma_buf_attachment *attach,
-				      struct sg_table *sgt);
 int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
 		      const struct drm_framebuffer_funcs *fb_funcs,
 		      struct drm_driver *driver);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e4fee8e..1021106 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
 #define _TTM_BO_DRIVER_H_
 
 #include <drm/drm_mm.h>
-#include <drm/drm_global.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
 };
 
 /**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
-	struct drm_global_reference ref;
-	struct ttm_mem_global *mem_glob;
-};
-
-/**
  * struct ttm_bo_global - Buffer object driver global data.
  *
  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
  * @swap_lru: Lru list of buffer objects used for swapping.
  */
 
-struct ttm_bo_global {
+extern struct ttm_bo_global {
 
 	/**
 	 * Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
 	struct kobject kobj;
 	struct ttm_mem_global *mem_glob;
 	struct page *dummy_read_page;
-	struct mutex device_list_mutex;
 	spinlock_t lru_lock;
 
 	/**
-	 * Protected by device_list_mutex.
+	 * Protected by ttm_global_mutex.
 	 */
+	unsigned int use_count;
 	struct list_head device_list;
 
 	/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
 	 * Internal protection.
 	 */
 	atomic_t bo_count;
-};
+} ttm_bo_glob;
 
 
 #define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *mem);
 
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
 int ttm_bo_device_release(struct ttm_bo_device *bdev);
 
 /**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * Returns:
  * !0: Failure.
  */
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b0fdd19..621615f 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -40,13 +40,13 @@
  *
  * @head:           list head for thread-private list.
  * @bo:             refcounted buffer object pointer.
- * @shared:         should the fence be added shared?
+ * @num_shared:     How many shared fences we want to add.
  */
 
 struct ttm_validate_buffer {
 	struct list_head head;
 	struct ttm_buffer_object *bo;
-	bool shared;
+	unsigned int num_shared;
 };
 
 /**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fe..3ff48a0 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
 
 #define TTM_MEM_MAX_ZONES 2
 struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
 	struct kobject kobj;
 	struct ttm_bo_global *bo_glob;
 	struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
 #else
 	struct ttm_mem_zone *zone_dma32;
 #endif
-};
+} ttm_mem_glob;
 
 extern int ttm_mem_global_init(struct ttm_mem_global *glob);
 extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8c..999e4b1 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 	return ret < 0 ? ret : 0;
 }
 
+struct dma_fence *dma_fence_get_stub(void);
 u64 dma_fence_context_alloc(unsigned num);
 
 #define DMA_FENCE_TRACE(f, fmt, args...) \
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 4f3febc..d2bacf5 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
 int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
 ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
 				size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+				     void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
 
 enum hdmi_spd_sdi {
 	HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
 			    const char *vendor, const char *product);
 ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
 				size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+				     void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
 
 enum hdmi_audio_coding_type {
 	HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
 int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
 ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
 				  void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+				       void *buffer, size_t size);
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
 
 enum hdmi_3d_structure {
 	HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
 int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
 ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
 				   void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+					void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
 
 union hdmi_vendor_any_infoframe {
 	struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
 	struct hdmi_audio_infoframe audio;
 };
 
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+			    size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+				 void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+			  const void *buffer, size_t size);
 void hdmi_infoframe_log(const char *level, struct device *dev,
-			union hdmi_infoframe *frame);
+			const union hdmi_infoframe *frame);
 
 #endif /* _DRM_HDMI_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 02166e8..2f0ffca 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -68,7 +68,6 @@ struct reservation_object_list {
  * @seq: sequence count for managing RCU read-side synchronization
  * @fence_excl: the exclusive fence, if there is one currently
  * @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
  */
 struct reservation_object {
 	struct ww_mutex lock;
@@ -76,7 +75,6 @@ struct reservation_object {
 
 	struct dma_fence __rcu *fence_excl;
 	struct reservation_object_list __rcu *fence;
-	struct reservation_object_list *staged;
 };
 
 #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
@@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj)
 	__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
 	RCU_INIT_POINTER(obj->fence, NULL);
 	RCU_INIT_POINTER(obj->fence_excl, NULL);
-	obj->staged = NULL;
 }
 
 /**
@@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj)
 
 		kfree(fobj);
 	}
-	kfree(obj->staged);
 
 	ww_mutex_destroy(&obj->lock);
 }
@@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj)
 static inline void
 reservation_object_unlock(struct reservation_object *obj)
 {
+#ifdef CONFIG_DEBUG_MUTEXES
+	/* Test shared fence slot reservation */
+	if (obj->fence)
+		obj->fence->shared_max = obj->fence->shared_count;
+#endif
 	ww_mutex_unlock(&obj->lock);
 }
 
@@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
 	return fence;
 }
 
-int reservation_object_reserve_shared(struct reservation_object *obj);
+int reservation_object_reserve_shared(struct reservation_object *obj,
+				      unsigned int num_fences);
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct dma_fence *fence);
 
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8a07a4..a8f6d5d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
 
 struct bio;
 
+struct pagevec;
+
 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK	0x7fff
 #define SWAP_FLAG_PRIO_SHIFT	0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
 #endif
 
 extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa..786816c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
 				      const struct attribute *attr,
 				      const void *ns);
 int __must_check sysfs_create_files(struct kobject *kobj,
-				   const struct attribute **attr);
+				   const struct attribute * const *attr);
 int __must_check sysfs_chmod_file(struct kobject *kobj,
 				  const struct attribute *attr, umode_t mode);
 struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
 			  const void *ns);
 bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
 
 int __must_check sysfs_create_bin_file(struct kobject *kobj,
 				       const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
 }
 
 static inline int sysfs_create_files(struct kobject *kobj,
-				    const struct attribute **attr)
+				    const struct attribute * const *attr)
 {
 	return 0;
 }
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
 }
 
 static inline void sysfs_remove_files(struct kobject *kobj,
-				     const struct attribute **attr)
+				     const struct attribute * const *attr)
 {
 }
 
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 370e9a5..be84e43 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
 /* GFX9 and later: */
 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT		0
 #define AMDGPU_TILING_SWIZZLE_MODE_MASK			0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT		5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK		0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT		29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK		0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT		43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK		0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0cd40eb..0b44260 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -151,6 +151,21 @@ extern "C" {
 #define DRM_FORMAT_VYUY		fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
 
 #define DRM_FORMAT_AYUV		fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888		fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0]   A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0  1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0		fourcc_code('Y', '0', 'L', '0')
+/* [63:0]   X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0  1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0		fourcc_code('X', '0', 'L', '0')
+
+/* [63:0]   A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0  1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2		fourcc_code('Y', '0', 'L', '2')
+/* [63:0]   X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0  1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2		fourcc_code('X', '0', 'L', '2')
 
 /*
  * 2 plane RGB + A
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d3e0fe3..a439c2e 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -888,6 +888,25 @@ struct drm_mode_revoke_lease {
 	__u32 lessee_id;
 };
 
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+	__s32 x1;
+	__s32 y1;
+	__s32 x2;
+	__s32 y2;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a4446f4..298b2e1 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
 	int irq_seq;
 } drm_i915_irq_wait_t;
 
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE	0
+#define I915_GEM_PPGTT_ALIASING	1
+#define I915_GEM_PPGTT_FULL	2
+
 /* Ioctl to query kernel params:
  */
 #define I915_PARAM_IRQ_ACTIVE            1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c06d0a5..91a16b3 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -105,14 +105,24 @@ struct drm_msm_gem_new {
 	__u32 handle;         /* out */
 };
 
-#define MSM_INFO_IOVA	0x01
-
-#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+/* Get or set GEM buffer info.  The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer.  For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET	0x00   /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA	0x01   /* get iova, returned by value */
+#define MSM_INFO_SET_NAME	0x02   /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME	0x03   /* get debug name, returned by pointer */
 
 struct drm_msm_gem_info {
 	__u32 handle;         /* in */
-	__u32 flags;	      /* in - combination of MSM_INFO_* flags */
-	__u64 offset;         /* out, mmap() offset or iova */
+	__u32 info;           /* in - one of MSM_INFO_* */
+	__u64 value;          /* in or out */
+	__u32 len;            /* in or out */
+	__u32 pad;
 };
 
 #define MSM_PREP_READ        0x01
@@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd {
  */
 #define MSM_SUBMIT_BO_READ             0x0001
 #define MSM_SUBMIT_BO_WRITE            0x0002
+#define MSM_SUBMIT_BO_DUMP             0x0004
 
-#define MSM_SUBMIT_BO_FLAGS            (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+#define MSM_SUBMIT_BO_FLAGS            (MSM_SUBMIT_BO_READ | \
+					MSM_SUBMIT_BO_WRITE | \
+					MSM_SUBMIT_BO_DUMP)
 
 struct drm_msm_gem_submit_bo {
 	__u32 flags;          /* in, mask of MSM_SUBMIT_BO_x */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 7b66277..35c7d81 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -36,6 +36,7 @@ extern "C" {
 #define DRM_V3D_MMAP_BO                           0x03
 #define DRM_V3D_GET_PARAM                         0x04
 #define DRM_V3D_GET_BO_OFFSET                     0x05
+#define DRM_V3D_SUBMIT_TFU                        0x06
 
 #define DRM_IOCTL_V3D_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
 #define DRM_IOCTL_V3D_WAIT_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -43,6 +44,7 @@ extern "C" {
 #define DRM_IOCTL_V3D_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
 #define DRM_IOCTL_V3D_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
 #define DRM_IOCTL_V3D_GET_BO_OFFSET       DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+#define DRM_IOCTL_V3D_SUBMIT_TFU          DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
 
 /**
  * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -58,10 +60,15 @@ struct drm_v3d_submit_cl {
 	 * coordinate shader to determine where primitives land on the screen,
 	 * then writes out the state updates and draw calls necessary per tile
 	 * to the tile allocation BO.
+	 *
+	 * This BCL will block on any previous BCL submitted on the
+	 * same FD, but not on any RCL or BCLs submitted by other
+	 * clients -- that is left up to the submitter to control
+	 * using in_sync_bcl if necessary.
 	 */
 	__u32 bcl_start;
 
-	 /** End address of the BCL (first byte after the BCL) */
+	/** End address of the BCL (first byte after the BCL) */
 	__u32 bcl_end;
 
 	/* Offset of the render command list.
@@ -69,10 +76,15 @@ struct drm_v3d_submit_cl {
 	 * This is the second set of commands executed, which will either
 	 * execute the tiles that have been set up by the BCL, or a fixed set
 	 * of tiles (in the case of RCL-only blits).
+	 *
+	 * This RCL will block on this submit's BCL, and any previous
+	 * RCL submitted on the same FD, but not on any RCL or BCLs
+	 * submitted by other clients -- that is left up to the
+	 * submitter to control using in_sync_rcl if necessary.
 	 */
 	__u32 rcl_start;
 
-	 /** End address of the RCL (first byte after the RCL) */
+	/** End address of the RCL (first byte after the RCL) */
 	__u32 rcl_end;
 
 	/** An optional sync object to wait on before starting the BCL. */
@@ -169,6 +181,7 @@ enum drm_v3d_param {
 	DRM_V3D_PARAM_V3D_CORE0_IDENT0,
 	DRM_V3D_PARAM_V3D_CORE0_IDENT1,
 	DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+	DRM_V3D_PARAM_SUPPORTS_TFU,
 };
 
 struct drm_v3d_get_param {
@@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset {
 	__u32 offset;
 };
 
+struct drm_v3d_submit_tfu {
+	__u32 icfg;
+	__u32 iia;
+	__u32 iis;
+	__u32 ica;
+	__u32 iua;
+	__u32 ioa;
+	__u32 ios;
+	__u32 coef[4];
+	/* First handle is the output BO, following are other inputs.
+	 * 0 for unused.
+	 */
+	__u32 bo_handles[4];
+	/* sync object to block on before running the TFU job.  Each TFU
+	 * job will execute in the order submitted to its FD.  Synchronization
+	 * against rendering jobs requires using sync objects.
+	 */
+	__u32 in_sync;
+	/* Sync object to signal when the TFU job is done. */
+	__u32 out_sync;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 9a781f0..f06a789 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,6 +47,13 @@ extern "C" {
 #define DRM_VIRTGPU_WAIT     0x08
 #define DRM_VIRTGPU_GET_CAPS  0x09
 
+#define VIRTGPU_EXECBUF_FENCE_FD_IN	0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT	0x02
+#define VIRTGPU_EXECBUF_FLAGS  (\
+		VIRTGPU_EXECBUF_FENCE_FD_IN |\
+		VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+		0)
+
 struct drm_virtgpu_map {
 	__u64 offset; /* use for mmap system call */
 	__u32 handle;
@@ -54,12 +61,12 @@ struct drm_virtgpu_map {
 };
 
 struct drm_virtgpu_execbuffer {
-	__u32		flags;		/* for future use */
+	__u32 flags;
 	__u32 size;
 	__u64 command; /* void* */
 	__u64 bo_handles;
 	__u32 num_bo_handles;
-	__u32 pad;
+	__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps {
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
 #define DRM_IOCTL_VIRTGPU_EXECBUFFER \
-	DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
 		struct drm_virtgpu_execbuffer)
 
 #define DRM_IOCTL_VIRTGPU_GETPARAM \
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b01eb50..e622fd1f 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
 	__u32 n_success;		/* to/from KFD */
 };
 
+struct kfd_ioctl_get_dmabuf_info_args {
+	__u64 size;		/* from KFD */
+	__u64 metadata_ptr;	/* to KFD */
+	__u32 metadata_size;	/* to KFD (space allocated by user)
+				 * from KFD (actual metadata size)
+				 */
+	__u32 gpu_id;	/* from KFD */
+	__u32 flags;		/* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+	__u32 dmabuf_fd;	/* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+	__u64 va_addr;	/* to KFD */
+	__u64 handle;	/* from KFD */
+	__u32 gpu_id;	/* to KFD */
+	__u32 dmabuf_fd;	/* to KFD */
+};
+
 #define AMDKFD_IOCTL_BASE 'K'
 #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)
 #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE		\
 		AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
 
+#define AMDKFD_IOC_GET_DMABUF_INFO		\
+		AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF		\
+		AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
 #define AMDKFD_COMMAND_START		0x01
-#define AMDKFD_COMMAND_END		0x1C
+#define AMDKFD_COMMAND_END		0x1E
 
 #endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index f43c3c6..8e88eba 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -41,6 +41,7 @@
 #include <linux/types.h>
 
 #define VIRTIO_GPU_F_VIRGL 0
+#define VIRTIO_GPU_F_EDID  1
 
 enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
 	VIRTIO_GPU_CMD_GET_CAPSET_INFO,
 	VIRTIO_GPU_CMD_GET_CAPSET,
+	VIRTIO_GPU_CMD_GET_EDID,
 
 	/* 3d commands */
 	VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type {
 	VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
 	VIRTIO_GPU_RESP_OK_CAPSET_INFO,
 	VIRTIO_GPU_RESP_OK_CAPSET,
+	VIRTIO_GPU_RESP_OK_EDID,
 
 	/* error responses */
 	VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset {
 	__u8 capset_data[];
 };
 
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 scanout;
+	__le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+	struct virtio_gpu_ctrl_hdr hdr;
+	__le32 size;
+	__le32 padding;
+	__u8 edid[1024];
+};
+
 #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
 
 struct virtio_gpu_config {
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index abbad94..e582e8e 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -246,6 +246,9 @@ struct ipu_image {
 	struct v4l2_rect rect;
 	dma_addr_t phys0;
 	dma_addr_t phys1;
+	/* chroma plane offset overrides */
+	u32 u_offset;
+	u32 v_offset;
 };
 
 void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic,
 		     int out_width, int out_height,
 		     enum ipu_color_space in_cs,
 		     enum ipu_color_space out_cs);
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+			 int in_width, int in_height,
+			 int out_width, int out_height,
+			 enum ipu_color_space in_cs,
+			 enum ipu_color_space out_cs,
+			 u32 rsc);
 int ipu_ic_task_graphics_init(struct ipu_ic *ic,
 			      enum ipu_color_space in_g_cs,
 			      bool galpha_en, u32 galpha,
diff --git a/mm/shmem.c b/mm/shmem.c
index 5d07e0b..375f3ac 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -758,7 +758,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
 			break;
 		index = indices[pvec.nr - 1] + 1;
 		pagevec_remove_exceptionals(&pvec);
-		check_move_unevictable_pages(pvec.pages, pvec.nr);
+		check_move_unevictable_pages(&pvec);
 		pagevec_release(&pvec);
 		cond_resched();
 	}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62ac0c48..24ab1f7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -46,6 +46,7 @@
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
 #include <linux/oom.h>
+#include <linux/pagevec.h>
 #include <linux/prefetch.h>
 #include <linux/printk.h>
 #include <linux/dax.h>
@@ -4182,17 +4183,16 @@ int page_evictable(struct page *page)
 	return ret;
 }
 
-#ifdef CONFIG_SHMEM
 /**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages:	array of pages to check
- * @nr_pages:	number of pages to check
+ * check_move_unevictable_pages - check pages for evictability and move to
+ * appropriate zone lru list
+ * @pvec: pagevec with lru pages to check
  *
- * Checks pages for evictability and moves them to the appropriate lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
+ * Checks pages for evictability, if an evictable page is in the unevictable
+ * lru list, moves it to the appropriate evictable lru list. This function
+ * should be only used for lru pages.
  */
-void check_move_unevictable_pages(struct page **pages, int nr_pages)
+void check_move_unevictable_pages(struct pagevec *pvec)
 {
 	struct lruvec *lruvec;
 	struct pglist_data *pgdat = NULL;
@@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
 	int pgrescued = 0;
 	int i;
 
-	for (i = 0; i < nr_pages; i++) {
-		struct page *page = pages[i];
+	for (i = 0; i < pvec->nr; i++) {
+		struct page *page = pvec->pages[i];
 		struct pglist_data *pagepgdat = page_pgdat(page);
 
 		pgscanned++;
@@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
 		spin_unlock_irq(&pgdat->lru_lock);
 	}
 }
-#endif /* CONFIG_SHMEM */
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
deleted file mode 100644
index 3a09c97..0000000
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-///
-/// Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
-/// drm_*_unreference() helpers.
-///
-// Confidence: High
-// Copyright: (C) 2017 NVIDIA Corporation
-// Options: --no-includes --include-headers
-//
-
-virtual patch
-virtual report
-
-@depends on patch@
-expression object;
-@@
-
-(
-- drm_connector_reference(object)
-+ drm_connector_get(object)
-|
-- drm_connector_unreference(object)
-+ drm_connector_put(object)
-|
-- drm_framebuffer_reference(object)
-+ drm_framebuffer_get(object)
-|
-- drm_framebuffer_unreference(object)
-+ drm_framebuffer_put(object)
-|
-- drm_gem_object_reference(object)
-+ drm_gem_object_get(object)
-|
-- drm_gem_object_unreference(object)
-+ drm_gem_object_put(object)
-|
-- __drm_gem_object_unreference(object)
-+ __drm_gem_object_put(object)
-|
-- drm_gem_object_unreference_unlocked(object)
-+ drm_gem_object_put_unlocked(object)
-|
-- drm_dev_unref(object)
-+ drm_dev_put(object)
-)
-
-@r depends on report@
-expression object;
-position p;
-@@
-
-(
-drm_connector_unreference@p(object)
-|
-drm_connector_reference@p(object)
-|
-drm_framebuffer_unreference@p(object)
-|
-drm_framebuffer_reference@p(object)
-|
-drm_gem_object_unreference@p(object)
-|
-drm_gem_object_reference@p(object)
-|
-__drm_gem_object_unreference(object)
-|
-drm_gem_object_unreference_unlocked(object)
-|
-drm_dev_unref@p(object)
-)
-
-@script:python depends on report@
-object << r.object;
-p << r.p;
-@@
-
-msg="WARNING: use get/put helpers to reference and dereference %s" % (object)
-coccilib.report.print_report(p[0], msg)
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 83d76c3..00c92eb 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1648,7 +1648,7 @@ static int had_create_jack(struct snd_intelhad *ctx,
  * PM callbacks
  */
 
-static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
+static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
 {
 	struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
 	int port;
@@ -1664,23 +1664,8 @@ static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
 		}
 	}
 
-	return 0;
-}
+	snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
 
-static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
-{
-	struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
-	int err;
-
-	err = hdmi_lpe_audio_runtime_suspend(dev);
-	if (!err)
-		snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
-	return err;
-}
-
-static int hdmi_lpe_audio_runtime_resume(struct device *dev)
-{
-	pm_runtime_mark_last_busy(dev);
 	return 0;
 }
 
@@ -1688,8 +1673,10 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
 {
 	struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
 
-	hdmi_lpe_audio_runtime_resume(dev);
+	pm_runtime_mark_last_busy(dev);
+
 	snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
+
 	return 0;
 }
 
@@ -1877,7 +1864,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_mark_last_busy(&pdev->dev);
-	pm_runtime_set_active(&pdev->dev);
 
 	dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
 	for_each_port(card_ctx, port) {
@@ -1908,8 +1894,6 @@ static int hdmi_lpe_audio_remove(struct platform_device *pdev)
 
 static const struct dev_pm_ops hdmi_lpe_audio_pm = {
 	SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
-	SET_RUNTIME_PM_OPS(hdmi_lpe_audio_runtime_suspend,
-			   hdmi_lpe_audio_runtime_resume, NULL)
 };
 
 static struct platform_driver hdmi_lpe_audio_driver = {